You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-dev@hadoop.apache.org by Apache Hudson Server <hu...@hudson.apache.org> on 2011/03/23 13:55:35 UTC

Hadoop-Hdfs-trunk - Build # 615 - Still Failing

See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/615/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 6 lines...]
java.lang.NullPointerException
	at hudson.tasks.JavadocArchiver.perform(JavadocArchiver.java:94)
	at hudson.tasks.BuildStepMonitor$1.perform(BuildStepMonitor.java:19)
	at hudson.model.AbstractBuild$AbstractRunner.perform(AbstractBuild.java:644)
	at hudson.model.AbstractBuild$AbstractRunner.performAllBuildSteps(AbstractBuild.java:623)
	at hudson.model.AbstractBuild$AbstractRunner.performAllBuildSteps(AbstractBuild.java:601)
	at hudson.model.Build$RunnerImpl.post2(Build.java:159)
	at hudson.model.AbstractBuild$AbstractRunner.post(AbstractBuild.java:570)
	at hudson.model.Run.run(Run.java:1386)
	at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:46)
	at hudson.model.ResourceController.execute(ResourceController.java:88)
	at hudson.model.Executor.run(Executor.java:145)
Archiving artifacts
Recording test results
ERROR: Publisher hudson.tasks.junit.JUnitResultArchiver aborted due to exception
java.lang.NullPointerException
	at hudson.tasks.junit.JUnitParser.parse(JUnitParser.java:83)
	at hudson.tasks.junit.JUnitResultArchiver.parse(JUnitResultArchiver.java:123)
	at hudson.tasks.junit.JUnitResultArchiver.perform(JUnitResultArchiver.java:135)
	at hudson.tasks.BuildStepMonitor$1.perform(BuildStepMonitor.java:19)
	at hudson.model.AbstractBuild$AbstractRunner.perform(AbstractBuild.java:644)
	at hudson.model.AbstractBuild$AbstractRunner.performAllBuildSteps(AbstractBuild.java:623)
	at hudson.model.AbstractBuild$AbstractRunner.performAllBuildSteps(AbstractBuild.java:601)
	at hudson.model.Build$RunnerImpl.post2(Build.java:159)
	at hudson.model.AbstractBuild$AbstractRunner.post(AbstractBuild.java:570)
	at hudson.model.Run.run(Run.java:1386)
	at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:46)
	at hudson.model.ResourceController.execute(ResourceController.java:88)
	at hudson.model.Executor.run(Executor.java:145)
Recording fingerprints
ERROR: Unable to record fingerprints because there's no workspace
ERROR: Publisher hudson.plugins.violations.ViolationsPublisher aborted due to exception
java.lang.NullPointerException
	at hudson.plugins.violations.ViolationsPublisher.perform(ViolationsPublisher.java:74)
	at hudson.tasks.BuildStepMonitor$3.perform(BuildStepMonitor.java:36)
	at hudson.model.AbstractBuild$AbstractRunner.perform(AbstractBuild.java:644)
	at hudson.model.AbstractBuild$AbstractRunner.performAllBuildSteps(AbstractBuild.java:623)
	at hudson.model.AbstractBuild$AbstractRunner.performAllBuildSteps(AbstractBuild.java:601)
	at hudson.model.Build$RunnerImpl.post2(Build.java:159)
	at hudson.model.AbstractBuild$AbstractRunner.post(AbstractBuild.java:570)
	at hudson.model.Run.run(Run.java:1386)
	at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:46)
	at hudson.model.ResourceController.execute(ResourceController.java:88)
	at hudson.model.Executor.run(Executor.java:145)
ERROR: Publisher hudson.plugins.clover.CloverPublisher aborted due to exception
java.lang.NullPointerException
	at hudson.plugins.clover.CloverPublisher.perform(CloverPublisher.java:137)
	at hudson.tasks.BuildStepMonitor$3.perform(BuildStepMonitor.java:36)
	at hudson.model.AbstractBuild$AbstractRunner.perform(AbstractBuild.java:644)
	at hudson.model.AbstractBuild$AbstractRunner.performAllBuildSteps(AbstractBuild.java:623)
	at hudson.model.AbstractBuild$AbstractRunner.performAllBuildSteps(AbstractBuild.java:601)
	at hudson.model.Build$RunnerImpl.post2(Build.java:159)
	at hudson.model.AbstractBuild$AbstractRunner.post(AbstractBuild.java:570)
	at hudson.model.Run.run(Run.java:1386)
	at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:46)
	at hudson.model.ResourceController.execute(ResourceController.java:88)
	at hudson.model.Executor.run(Executor.java:145)
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
No tests ran.

Hadoop-Hdfs-trunk - Build # 642 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/642/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 730857 lines...]
    [junit] 2011-04-19 12:21:35,904 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-04-19 12:21:35,905 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-19 12:21:35,905 INFO  datanode.DataNode (DataNode.java:run(1497)) - DatanodeRegistration(127.0.0.1:44943, storageID=DS-698805625-127.0.1.1-44943-1303215695276, infoPort=45861, ipcPort=41554):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}
    [junit] 2011-04-19 12:21:35,905 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 41554
    [junit] 2011-04-19 12:21:35,905 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-19 12:21:35,905 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-19 12:21:35,905 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-19 12:21:35,906 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-19 12:21:35,906 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-04-19 12:21:36,007 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 39285
    [junit] 2011-04-19 12:21:36,007 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 39285: exiting
    [junit] 2011-04-19 12:21:36,007 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 39285
    [junit] 2011-04-19 12:21:36,007 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] 2011-04-19 12:21:36,007 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-04-19 12:21:36,007 WARN  datanode.DataNode (DataXceiverServer.java:run(142)) - DatanodeRegistration(127.0.0.1:49559, storageID=DS-1905199131-127.0.1.1-49559-1303215695148, infoPort=49257, ipcPort=39285):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:135)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-04-19 12:21:36,009 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-19 12:21:36,110 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-19 12:21:36,110 INFO  datanode.DataNode (DataNode.java:run(1497)) - DatanodeRegistration(127.0.0.1:49559, storageID=DS-1905199131-127.0.1.1-49559-1303215695148, infoPort=49257, ipcPort=39285):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-04-19 12:21:36,110 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 39285
    [junit] 2011-04-19 12:21:36,110 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-19 12:21:36,110 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-19 12:21:36,110 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-19 12:21:36,111 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-19 12:21:36,212 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-19 12:21:36,212 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(573)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 4 4 
    [junit] 2011-04-19 12:21:36,212 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2896)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-19 12:21:36,213 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 55002
    [junit] 2011-04-19 12:21:36,214 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 55002: exiting
    [junit] 2011-04-19 12:21:36,214 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 55002
    [junit] 2011-04-19 12:21:36,214 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 16, Failures: 0, Errors: 0, Time elapsed: 97.315 sec

checkfailure:
    [touch] Creating /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/testsfailed

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:747: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:505: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/test/aop/build/aop.xml:230: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:688: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:662: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:730: Tests failed!

Total time: 49 minutes 4 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestFiDataTransferProtocol2.pipeline_Fi_17

Error Message:
Failed to add a datanode: nodes.length != original.length + 1, nodes=[127.0.0.1:50271], original=[127.0.0.1:50271]

Stack Trace:
java.io.IOException: Failed to add a datanode: nodes.length != original.length + 1, nodes=[127.0.0.1:50271], original=[127.0.0.1:50271]
	at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.findNewDatanode(DFSOutputStream.java:768)
	at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.addDatanode2ExistingPipeline(DFSOutputStream.java:824)
	at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.setupPipelineForAppendOrRecovery(DFSOutputStream.java:918)
	at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.processDatanodeError(DFSOutputStream.java:731)
	at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.run(DFSOutputStream.java:415)


REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_09

Error Message:
Wrong number of PendingReplication blocks expected:<2> but was:<1>

Stack Trace:
junit.framework.AssertionFailedError: Wrong number of PendingReplication blocks expected:<2> but was:<1>
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.__CLR3_0_2fte182xp1(TestBlockReport.java:457)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_09(TestBlockReport.java:429)




Hadoop-Hdfs-trunk - Build # 641 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/641/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 711231 lines...]
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target
     [echo]  Including clover.jar in the war file ...
[cactifywar] Analyzing war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/hdfsproxy-2.0-test.war
[cactifywar] Building war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war

cactifywar:

test-cactus:
     [echo]  Free Ports: startup-40822 / http-40823 / https-40824
     [echo] Please take a deep breath while Cargo gets the Tomcat for running the servlet tests...
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/temp
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/logs
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/reports
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
   [cactus] -----------------------------------------------------------------
   [cactus] Running tests against Tomcat 5.x @ http://localhost:40823
   [cactus] -----------------------------------------------------------------
   [cactus] Deploying [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war] to [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps]...
   [cactus] Tomcat 5.x starting...
Server [Apache-Coyote/1.1] started
   [cactus] WARNING: multiple versions of ant detected in path for junit 
   [cactus]          jar:file:/homes/hudson/tools/ant/latest/lib/ant.jar!/org/apache/tools/ant/Project.class
   [cactus]      and jar:file:/homes/hudson/.ivy2/cache/ant/ant/jars/ant-1.6.5.jar!/org/apache/tools/ant/Project.class
   [cactus] Running org.apache.hadoop.hdfsproxy.TestAuthorizationFilter
   [cactus] Tests run: 4, Failures: 2, Errors: 0, Time elapsed: 0.481 sec
   [cactus] Test org.apache.hadoop.hdfsproxy.TestAuthorizationFilter FAILED
   [cactus] Running org.apache.hadoop.hdfsproxy.TestLdapIpDirFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.333 sec
   [cactus] Tomcat 5.x started on port [40823]
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.329 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyForwardServlet
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.318 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyUtil
   [cactus] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0.859 sec
   [cactus] Tomcat 5.x is stopping...
   [cactus] Tomcat 5.x is stopped

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:753: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:734: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/build.xml:49: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/hdfsproxy/build.xml:343: Tests failed!

Total time: 62 minutes 4 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermit

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermit(TestAuthorizationFilter.java:113)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)


FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermitQualified

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermitQualified(TestAuthorizationFilter.java:136)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)




Hadoop-Hdfs-trunk - Build # 640 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/640/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 722627 lines...]
    [junit] 
    [junit] 2011-04-17 12:35:04,371 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-17 12:35:04,371 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-17 12:35:04,371 INFO  datanode.DataNode (DataNode.java:run(1497)) - DatanodeRegistration(127.0.0.1:53934, storageID=DS-1753167764-127.0.1.1-53934-1303043703615, infoPort=45352, ipcPort=33069):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}
    [junit] 2011-04-17 12:35:04,372 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 33069
    [junit] 2011-04-17 12:35:04,372 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-17 12:35:04,372 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-17 12:35:04,372 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-17 12:35:04,372 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-17 12:35:04,373 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-04-17 12:35:04,473 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 46160
    [junit] 2011-04-17 12:35:04,474 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 46160: exiting
    [junit] 2011-04-17 12:35:04,474 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 46160
    [junit] 2011-04-17 12:35:04,474 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] 2011-04-17 12:35:04,474 WARN  datanode.DataNode (DataXceiverServer.java:run(142)) - DatanodeRegistration(127.0.0.1:45883, storageID=DS-899432502-127.0.1.1-45883-1303043703453, infoPort=52177, ipcPort=46160):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:135)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-04-17 12:35:04,474 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-04-17 12:35:04,474 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-17 12:35:04,575 INFO  datanode.DataNode (DataNode.java:run(1497)) - DatanodeRegistration(127.0.0.1:45883, storageID=DS-899432502-127.0.1.1-45883-1303043703453, infoPort=52177, ipcPort=46160):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-04-17 12:35:04,575 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 46160
    [junit] 2011-04-17 12:35:04,575 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-17 12:35:04,575 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-17 12:35:04,575 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-17 12:35:04,575 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-17 12:35:04,676 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-17 12:35:04,676 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2896)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-17 12:35:04,676 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(573)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 5 3 
    [junit] 2011-04-17 12:35:04,678 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 40108
    [junit] 2011-04-17 12:35:04,678 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 40108: exiting
    [junit] 2011-04-17 12:35:04,678 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 40108
    [junit] 2011-04-17 12:35:04,678 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 16, Failures: 0, Errors: 0, Time elapsed: 99.107 sec

checkfailure:
    [touch] Creating /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/testsfailed

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:747: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:505: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/test/aop/build/aop.xml:230: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:688: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:662: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:730: Tests failed!

Total time: 61 minutes 40 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestFiDataTransferProtocol2.pipeline_Fi_18

Error Message:
Failed to add a datanode: nodes.length != original.length + 1, nodes=[127.0.0.1:54748], original=[127.0.0.1:54748]

Stack Trace:
java.io.IOException: Failed to add a datanode: nodes.length != original.length + 1, nodes=[127.0.0.1:54748], original=[127.0.0.1:54748]
	at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.findNewDatanode(DFSOutputStream.java:768)
	at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.addDatanode2ExistingPipeline(DFSOutputStream.java:824)
	at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.setupPipelineForAppendOrRecovery(DFSOutputStream.java:918)
	at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.processDatanodeError(DFSOutputStream.java:731)
	at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.run(DFSOutputStream.java:415)




Hadoop-Hdfs-trunk - Build # 639 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/639/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 1819 lines...]
    [javac]                                              ^
    [javac] /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/test/hdfs/org/apache/hadoop/cli/TestHDFSCLI.java:93: cannot find symbol
    [javac] symbol  : class TestCmd
    [javac] location: class org.apache.hadoop.cli.TestHDFSCLI
    [javac]   protected Result execute(TestCmd cmd) throws Exception {
    [javac]                            ^
    [javac] /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/test/hdfs/org/apache/hadoop/cli/CmdFactoryDFS.java:32: cannot find symbol
    [javac] symbol  : variable DFSADMIN
    [javac] location: class org.apache.hadoop.cli.CmdFactoryDFS
    [javac]       case DFSADMIN:
    [javac]            ^
    [javac] /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/test/hdfs/org/apache/hadoop/cli/CmdFactoryDFS.java:33: package CLICommands does not exist
    [javac]         executor = new CLICommands.FSCmdExecutor(tag, new DFSAdmin());
    [javac]                                   ^
    [javac] /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/test/hdfs/org/apache/hadoop/cli/CmdFactoryDFS.java:36: cannot find symbol
    [javac] symbol  : variable CmdFactory
    [javac] location: class org.apache.hadoop.cli.CmdFactoryDFS
    [javac]         executor = CmdFactory.getCommandExecutor(cmd, tag);
    [javac]                    ^
    [javac] /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java:355: cannot find symbol
    [javac] symbol  : class TestCmd
    [javac] location: class org.apache.hadoop.cli.util.CLITestData
    [javac]           new CLITestData.TestCmd(cmd, CLITestData.TestCmd.CommandType.DFSADMIN),
    [javac]                          ^
    [javac] /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java:355: cannot find symbol
    [javac] symbol  : variable TestCmd
    [javac] location: class org.apache.hadoop.cli.util.CLITestData
    [javac]           new CLITestData.TestCmd(cmd, CLITestData.TestCmd.CommandType.DFSADMIN),
    [javac]                                                   ^
    [javac] Note: Some input files use or override a deprecated API.
    [javac] Note: Recompile with -Xlint:deprecation for details.
    [javac] 11 errors

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:412: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:446: Compile failed; see the compiler error output for details.

Total time: 44 seconds


======================================================================
======================================================================
STORE: saving artifacts
======================================================================
======================================================================


mv: cannot stat `build/*.tar.gz': No such file or directory
mv: cannot stat `build/test/findbugs': No such file or directory
Build Failed
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
No tests ran.

Hadoop-Hdfs-trunk - Build # 638 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/638/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 713453 lines...]
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target
     [echo]  Including clover.jar in the war file ...
[cactifywar] Analyzing war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/hdfsproxy-2.0-test.war
[cactifywar] Building war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war

cactifywar:

test-cactus:
     [echo]  Free Ports: startup-44211 / http-44212 / https-44213
     [echo] Please take a deep breath while Cargo gets the Tomcat for running the servlet tests...
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/temp
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/logs
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/reports
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
   [cactus] -----------------------------------------------------------------
   [cactus] Running tests against Tomcat 5.x @ http://localhost:44212
   [cactus] -----------------------------------------------------------------
   [cactus] Deploying [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war] to [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps]...
   [cactus] Tomcat 5.x starting...
Server [Apache-Coyote/1.1] started
   [cactus] WARNING: multiple versions of ant detected in path for junit 
   [cactus]          jar:file:/homes/hudson/tools/ant/latest/lib/ant.jar!/org/apache/tools/ant/Project.class
   [cactus]      and jar:file:/homes/hudson/.ivy2/cache/ant/ant/jars/ant-1.6.5.jar!/org/apache/tools/ant/Project.class
   [cactus] Running org.apache.hadoop.hdfsproxy.TestAuthorizationFilter
   [cactus] Tests run: 4, Failures: 2, Errors: 0, Time elapsed: 0.459 sec
   [cactus] Test org.apache.hadoop.hdfsproxy.TestAuthorizationFilter FAILED
   [cactus] Running org.apache.hadoop.hdfsproxy.TestLdapIpDirFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.31 sec
   [cactus] Tomcat 5.x started on port [44212]
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.324 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyForwardServlet
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.33 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyUtil
   [cactus] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0.867 sec
   [cactus] Tomcat 5.x is stopping...
   [cactus] Tomcat 5.x is stopped

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:753: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:734: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/build.xml:49: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/hdfsproxy/build.xml:343: Tests failed!

Total time: 52 minutes 0 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermit

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermit(TestAuthorizationFilter.java:113)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)


FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermitQualified

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermitQualified(TestAuthorizationFilter.java:136)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)




Hadoop-Hdfs-trunk - Build # 637 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/637/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 695403 lines...]
    [junit] 
    [junit] 2011-04-14 12:25:10,208 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-04-14 12:25:10,208 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] 2011-04-14 12:25:10,208 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-14 12:25:10,209 INFO  datanode.DataNode (DataNode.java:run(1497)) - DatanodeRegistration(127.0.0.1:53070, storageID=DS-1559300299-127.0.1.1-53070-1302783909591, infoPort=59092, ipcPort=35341):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}
    [junit] 2011-04-14 12:25:10,209 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 35341
    [junit] 2011-04-14 12:25:10,209 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-14 12:25:10,209 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-14 12:25:10,209 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-14 12:25:10,209 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-14 12:25:10,210 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-04-14 12:25:10,310 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 53309
    [junit] 2011-04-14 12:25:10,311 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 53309: exiting
    [junit] 2011-04-14 12:25:10,311 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 53309
    [junit] 2011-04-14 12:25:10,311 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] 2011-04-14 12:25:10,311 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-04-14 12:25:10,311 WARN  datanode.DataNode (DataXceiverServer.java:run(142)) - DatanodeRegistration(127.0.0.1:55962, storageID=DS-1033477654-127.0.1.1-55962-1302783909440, infoPort=41539, ipcPort=53309):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:135)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-04-14 12:25:10,313 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-14 12:25:10,314 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-14 12:25:10,315 INFO  datanode.DataNode (DataNode.java:run(1497)) - DatanodeRegistration(127.0.0.1:55962, storageID=DS-1033477654-127.0.1.1-55962-1302783909440, infoPort=41539, ipcPort=53309):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-04-14 12:25:10,315 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 53309
    [junit] 2011-04-14 12:25:10,315 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-14 12:25:10,315 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-14 12:25:10,315 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-14 12:25:10,315 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-14 12:25:10,427 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2908)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-14 12:25:10,427 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-14 12:25:10,427 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(573)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 8 3 
    [junit] 2011-04-14 12:25:10,429 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 60224
    [junit] 2011-04-14 12:25:10,430 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 60224: exiting
    [junit] 2011-04-14 12:25:10,430 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 60224
    [junit] 2011-04-14 12:25:10,430 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 16, Failures: 0, Errors: 0, Time elapsed: 98.453 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 51 minutes 48 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
All tests passed

Hadoop-Hdfs-trunk - Build # 636 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/636/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 715049 lines...]
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:135)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-04-13 12:49:23,947 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-13 12:49:23,948 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-13 12:49:23,948 INFO  datanode.DataNode (DataNode.java:run(1497)) - DatanodeRegistration(127.0.0.1:43506, storageID=DS-1486568985-127.0.1.1-43506-1302698963166, infoPort=48490, ipcPort=54645):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}
    [junit] 2011-04-13 12:49:23,948 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 54645
    [junit] 2011-04-13 12:49:23,948 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-13 12:49:23,948 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-13 12:49:23,949 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-13 12:49:23,949 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-13 12:49:23,949 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-04-13 12:49:23,950 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 35691
    [junit] 2011-04-13 12:49:23,950 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 35691: exiting
    [junit] 2011-04-13 12:49:23,951 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 35691
    [junit] 2011-04-13 12:49:23,951 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] 2011-04-13 12:49:23,951 WARN  datanode.DataNode (DataXceiverServer.java:run(142)) - DatanodeRegistration(127.0.0.1:50046, storageID=DS-591968703-127.0.1.1-50046-1302698963017, infoPort=48358, ipcPort=35691):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:135)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-04-13 12:49:23,951 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-04-13 12:49:23,952 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-13 12:49:23,952 INFO  datanode.DataNode (DataNode.java:run(1497)) - DatanodeRegistration(127.0.0.1:50046, storageID=DS-591968703-127.0.1.1-50046-1302698963017, infoPort=48358, ipcPort=35691):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-04-13 12:49:23,952 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 35691
    [junit] 2011-04-13 12:49:23,952 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-13 12:49:23,952 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-13 12:49:23,953 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-13 12:49:23,953 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-13 12:49:24,054 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2908)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-13 12:49:24,054 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-13 12:49:24,054 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(573)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 3 6 
    [junit] 2011-04-13 12:49:24,056 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 60243
    [junit] 2011-04-13 12:49:24,056 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 60243: exiting
    [junit] 2011-04-13 12:49:24,056 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 60243
    [junit] 2011-04-13 12:49:24,057 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 16, Failures: 0, Errors: 0, Time elapsed: 99.675 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 76 minutes 4 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.fs.TestHDFSFileContextMainOperations.testCreateFlagAppendExistingFile

Error Message:
Timeout occurred. Please note the time in the report does not reflect the time until the timeout.

Stack Trace:
junit.framework.AssertionFailedError: Timeout occurred. Please note the time in the report does not reflect the time until the timeout.




Hadoop-Hdfs-trunk - Build # 635 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/635/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 733846 lines...]
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target
     [echo]  Including clover.jar in the war file ...
[cactifywar] Analyzing war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/hdfsproxy-2.0-test.war
[cactifywar] Building war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war

cactifywar:

test-cactus:
     [echo]  Free Ports: startup-25446 / http-25447 / https-25448
     [echo] Please take a deep breath while Cargo gets the Tomcat for running the servlet tests...
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/temp
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/logs
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/reports
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
   [cactus] -----------------------------------------------------------------
   [cactus] Running tests against Tomcat 5.x @ http://localhost:25447
   [cactus] -----------------------------------------------------------------
   [cactus] Deploying [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war] to [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps]...
   [cactus] Tomcat 5.x starting...
Server [Apache-Coyote/1.1] started
   [cactus] WARNING: multiple versions of ant detected in path for junit 
   [cactus]          jar:file:/homes/hudson/tools/ant/latest/lib/ant.jar!/org/apache/tools/ant/Project.class
   [cactus]      and jar:file:/homes/hudson/.ivy2/cache/ant/ant/jars/ant-1.6.5.jar!/org/apache/tools/ant/Project.class
   [cactus] Running org.apache.hadoop.hdfsproxy.TestAuthorizationFilter
   [cactus] Tests run: 4, Failures: 2, Errors: 0, Time elapsed: 0.472 sec
   [cactus] Test org.apache.hadoop.hdfsproxy.TestAuthorizationFilter FAILED
   [cactus] Running org.apache.hadoop.hdfsproxy.TestLdapIpDirFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.352 sec
   [cactus] Tomcat 5.x started on port [25447]
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.33 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyForwardServlet
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.333 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyUtil
   [cactus] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0.858 sec
   [cactus] Tomcat 5.x is stopping...
   [cactus] Tomcat 5.x is stopped

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:753: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:734: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/build.xml:49: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/hdfsproxy/build.xml:343: Tests failed!

Total time: 60 minutes 21 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermit

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermit(TestAuthorizationFilter.java:113)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)


FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermitQualified

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermitQualified(TestAuthorizationFilter.java:136)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)




Hadoop-Hdfs-trunk - Build # 634 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/634/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 719226 lines...]
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-04-11 12:23:49,932 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-04-11 12:23:49,932 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-11 12:23:49,933 INFO  datanode.DataNode (DataNode.java:run(1496)) - DatanodeRegistration(127.0.0.1:47347, storageID=DS-582507416-127.0.1.1-47347-1302524629514, infoPort=54226, ipcPort=44767):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}
    [junit] 2011-04-11 12:23:49,933 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 44767
    [junit] 2011-04-11 12:23:49,933 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-11 12:23:49,933 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-11 12:23:49,933 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-11 12:23:49,933 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-11 12:23:49,934 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-04-11 12:23:50,034 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 48638
    [junit] 2011-04-11 12:23:50,035 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 48638: exiting
    [junit] 2011-04-11 12:23:50,035 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-04-11 12:23:50,035 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] 2011-04-11 12:23:50,035 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 48638
    [junit] 2011-04-11 12:23:50,035 WARN  datanode.DataNode (DataXceiverServer.java:run(142)) - DatanodeRegistration(127.0.0.1:34265, storageID=DS-1376291308-127.0.1.1-34265-1302524629363, infoPort=47209, ipcPort=48638):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:135)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-04-11 12:23:50,037 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-11 12:23:50,038 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-11 12:23:50,038 INFO  datanode.DataNode (DataNode.java:run(1496)) - DatanodeRegistration(127.0.0.1:34265, storageID=DS-1376291308-127.0.1.1-34265-1302524629363, infoPort=47209, ipcPort=48638):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-04-11 12:23:50,038 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 48638
    [junit] 2011-04-11 12:23:50,038 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-11 12:23:50,039 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-11 12:23:50,039 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-11 12:23:50,039 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-11 12:23:50,140 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2857)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-11 12:23:50,140 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-11 12:23:50,140 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 5 4 
    [junit] 2011-04-11 12:23:50,142 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 47278
    [junit] 2011-04-11 12:23:50,142 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 47278: exiting
    [junit] 2011-04-11 12:23:50,142 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 47278
    [junit] 2011-04-11 12:23:50,142 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 16, Failures: 0, Errors: 0, Time elapsed: 95.075 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 50 minutes 31 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_09

Error Message:
Was waiting too long for a replica to become TEMPORARY

Stack Trace:
junit.framework.AssertionFailedError: Was waiting too long for a replica to become TEMPORARY
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.waitForTempReplica(TestBlockReport.java:514)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.__CLR3_0_2fte182rgt(TestBlockReport.java:451)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_09(TestBlockReport.java:429)




Hadoop-Hdfs-trunk - Build # 633 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/633/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 756184 lines...]
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target
     [echo]  Including clover.jar in the war file ...
[cactifywar] Analyzing war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/hdfsproxy-2.0-test.war
[cactifywar] Building war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war

cactifywar:

test-cactus:
     [echo]  Free Ports: startup-54162 / http-54163 / https-54164
     [echo] Please take a deep breath while Cargo gets the Tomcat for running the servlet tests...
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/temp
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/logs
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/reports
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
   [cactus] -----------------------------------------------------------------
   [cactus] Running tests against Tomcat 5.x @ http://localhost:54163
   [cactus] -----------------------------------------------------------------
   [cactus] Deploying [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war] to [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps]...
   [cactus] Tomcat 5.x starting...
Server [Apache-Coyote/1.1] started
   [cactus] WARNING: multiple versions of ant detected in path for junit 
   [cactus]          jar:file:/homes/hudson/tools/ant/latest/lib/ant.jar!/org/apache/tools/ant/Project.class
   [cactus]      and jar:file:/homes/hudson/.ivy2/cache/ant/ant/jars/ant-1.6.5.jar!/org/apache/tools/ant/Project.class
   [cactus] Running org.apache.hadoop.hdfsproxy.TestAuthorizationFilter
   [cactus] Tests run: 4, Failures: 2, Errors: 0, Time elapsed: 0.5 sec
   [cactus] Test org.apache.hadoop.hdfsproxy.TestAuthorizationFilter FAILED
   [cactus] Running org.apache.hadoop.hdfsproxy.TestLdapIpDirFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.352 sec
   [cactus] Tomcat 5.x started on port [54163]
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.341 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyForwardServlet
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.32 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyUtil
   [cactus] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0.868 sec
   [cactus] Tomcat 5.x is stopping...
   [cactus] Tomcat 5.x is stopped

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:753: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:734: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/build.xml:49: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/hdfsproxy/build.xml:343: Tests failed!

Total time: 60 minutes 33 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermit

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermit(TestAuthorizationFilter.java:113)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)


FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermitQualified

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermitQualified(TestAuthorizationFilter.java:136)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)




Hadoop-Hdfs-trunk - Build # 632 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/632/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 721228 lines...]
    [junit] 2011-04-09 12:23:33,036 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-04-09 12:23:33,036 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-09 12:23:33,037 INFO  datanode.DataNode (DataNode.java:run(1496)) - DatanodeRegistration(127.0.0.1:33367, storageID=DS-2055774178-127.0.1.1-33367-1302351812614, infoPort=48133, ipcPort=32836):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}
    [junit] 2011-04-09 12:23:33,037 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 32836
    [junit] 2011-04-09 12:23:33,037 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-09 12:23:33,037 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-09 12:23:33,037 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-09 12:23:33,037 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-09 12:23:33,038 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-04-09 12:23:33,138 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 34146
    [junit] 2011-04-09 12:23:33,139 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 34146: exiting
    [junit] 2011-04-09 12:23:33,139 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 34146
    [junit] 2011-04-09 12:23:33,139 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] 2011-04-09 12:23:33,139 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-04-09 12:23:33,139 WARN  datanode.DataNode (DataXceiverServer.java:run(142)) - DatanodeRegistration(127.0.0.1:45214, storageID=DS-1160250199-127.0.1.1-45214-1302351812466, infoPort=56656, ipcPort=34146):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:135)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-04-09 12:23:33,141 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-09 12:23:33,242 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-09 12:23:33,242 INFO  datanode.DataNode (DataNode.java:run(1496)) - DatanodeRegistration(127.0.0.1:45214, storageID=DS-1160250199-127.0.1.1-45214-1302351812466, infoPort=56656, ipcPort=34146):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-04-09 12:23:33,242 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 34146
    [junit] 2011-04-09 12:23:33,242 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-09 12:23:33,242 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-09 12:23:33,242 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-09 12:23:33,243 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-09 12:23:33,344 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-09 12:23:33,344 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 7 3 
    [junit] 2011-04-09 12:23:33,344 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2857)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-09 12:23:33,345 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 34612
    [junit] 2011-04-09 12:23:33,346 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 34612: exiting
    [junit] 2011-04-09 12:23:33,346 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 34612
    [junit] 2011-04-09 12:23:33,346 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 16, Failures: 0, Errors: 0, Time elapsed: 94.844 sec

checkfailure:
    [touch] Creating /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/testsfailed

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:747: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:505: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/test/aop/build/aop.xml:230: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:688: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:662: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:730: Tests failed!

Total time: 50 minutes 16 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestFiDataTransferProtocol2.pipeline_Fi_29

Error Message:
null

Stack Trace:
junit.framework.AssertionFailedError: 
	at org.apache.hadoop.hdfs.server.datanode.TestFiDataTransferProtocol2.runTest29_30(TestFiDataTransferProtocol2.java:153)
	at org.apache.hadoop.hdfs.server.datanode.TestFiDataTransferProtocol2.pipeline_Fi_29(TestFiDataTransferProtocol2.java:251)




Hadoop-Hdfs-trunk - Build # 631 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/631/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 736597 lines...]
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-04-08 12:22:13,792 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-08 12:22:13,792 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-08 12:22:13,793 INFO  datanode.DataNode (DataNode.java:run(1496)) - DatanodeRegistration(127.0.0.1:35209, storageID=DS-1023019400-127.0.1.1-35209-1302265323192, infoPort=53571, ipcPort=35154):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}
    [junit] 2011-04-08 12:22:13,793 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 35154
    [junit] 2011-04-08 12:22:13,793 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-08 12:22:13,793 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-08 12:22:13,794 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-08 12:22:13,794 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-08 12:22:13,794 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-04-08 12:22:13,896 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 52755
    [junit] 2011-04-08 12:22:13,896 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 52755: exiting
    [junit] 2011-04-08 12:22:13,896 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] 2011-04-08 12:22:13,896 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-04-08 12:22:13,897 WARN  datanode.DataNode (DataXceiverServer.java:run(142)) - DatanodeRegistration(127.0.0.1:60366, storageID=DS-1346925635-127.0.1.1-60366-1302265323017, infoPort=51510, ipcPort=52755):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:135)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-04-08 12:22:13,897 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 52755
    [junit] 2011-04-08 12:22:13,899 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-08 12:22:13,999 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-08 12:22:13,999 INFO  datanode.DataNode (DataNode.java:run(1496)) - DatanodeRegistration(127.0.0.1:60366, storageID=DS-1346925635-127.0.1.1-60366-1302265323017, infoPort=51510, ipcPort=52755):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-04-08 12:22:14,000 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 52755
    [junit] 2011-04-08 12:22:14,000 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-08 12:22:14,000 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-08 12:22:14,000 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-08 12:22:14,001 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-08 12:22:14,102 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-08 12:22:14,103 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 5 4 
    [junit] 2011-04-08 12:22:14,103 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2857)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-08 12:22:14,104 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 55573
    [junit] 2011-04-08 12:22:14,105 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 55573: exiting
    [junit] 2011-04-08 12:22:14,105 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 55573
    [junit] 2011-04-08 12:22:14,105 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.623 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 48 minutes 59 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.TestDFSShell.testCount

Error Message:
not supposed to get here

Stack Trace:
java.lang.RuntimeException: not supposed to get here
	at org.apache.hadoop.fs.shell.FsCommand.run(FsCommand.java:51)
	at org.apache.hadoop.fs.shell.Command.runAll(Command.java:100)
	at org.apache.hadoop.hdfs.TestDFSShell.runCount(TestDFSShell.java:737)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2xc567w1396(TestDFSShell.java:705)
	at org.apache.hadoop.hdfs.TestDFSShell.testCount(TestDFSShell.java:694)




Hadoop-Hdfs-trunk - Build # 630 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/630/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 716427 lines...]
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target
     [echo]  Including clover.jar in the war file ...
[cactifywar] Analyzing war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/hdfsproxy-2.0-test.war
[cactifywar] Building war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war

cactifywar:

test-cactus:
     [echo]  Free Ports: startup-36789 / http-36790 / https-36791
     [echo] Please take a deep breath while Cargo gets the Tomcat for running the servlet tests...
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/temp
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/logs
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/reports
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
   [cactus] -----------------------------------------------------------------
   [cactus] Running tests against Tomcat 5.x @ http://localhost:36790
   [cactus] -----------------------------------------------------------------
   [cactus] Deploying [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war] to [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps]...
   [cactus] Tomcat 5.x starting...
Server [Apache-Coyote/1.1] started
   [cactus] WARNING: multiple versions of ant detected in path for junit 
   [cactus]          jar:file:/homes/hudson/tools/ant/latest/lib/ant.jar!/org/apache/tools/ant/Project.class
   [cactus]      and jar:file:/homes/hudson/.ivy2/cache/ant/ant/jars/ant-1.6.5.jar!/org/apache/tools/ant/Project.class
   [cactus] Running org.apache.hadoop.hdfsproxy.TestAuthorizationFilter
   [cactus] Tests run: 4, Failures: 2, Errors: 0, Time elapsed: 0.483 sec
   [cactus] Test org.apache.hadoop.hdfsproxy.TestAuthorizationFilter FAILED
   [cactus] Running org.apache.hadoop.hdfsproxy.TestLdapIpDirFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.323 sec
   [cactus] Tomcat 5.x started on port [36790]
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.32 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyForwardServlet
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.353 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyUtil
   [cactus] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0.871 sec
   [cactus] Tomcat 5.x is stopping...
   [cactus] Tomcat 5.x is stopped

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:753: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:734: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/build.xml:49: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/hdfsproxy/build.xml:343: Tests failed!

Total time: 59 minutes 36 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermit

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermit(TestAuthorizationFilter.java:113)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)


FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermitQualified

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermitQualified(TestAuthorizationFilter.java:136)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)




Hadoop-Hdfs-trunk - Build # 629 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/629/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 721564 lines...]
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target
     [echo]  Including clover.jar in the war file ...
[cactifywar] Analyzing war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/hdfsproxy-2.0-test.war
[cactifywar] Building war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war

cactifywar:

test-cactus:
     [echo]  Free Ports: startup-21331 / http-21332 / https-21333
     [echo] Please take a deep breath while Cargo gets the Tomcat for running the servlet tests...
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/temp
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/logs
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/reports
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
   [cactus] -----------------------------------------------------------------
   [cactus] Running tests against Tomcat 5.x @ http://localhost:21332
   [cactus] -----------------------------------------------------------------
   [cactus] Deploying [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war] to [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps]...
   [cactus] Tomcat 5.x starting...
Server [Apache-Coyote/1.1] started
   [cactus] WARNING: multiple versions of ant detected in path for junit 
   [cactus]          jar:file:/homes/hudson/tools/ant/latest/lib/ant.jar!/org/apache/tools/ant/Project.class
   [cactus]      and jar:file:/homes/hudson/.ivy2/cache/ant/ant/jars/ant-1.6.5.jar!/org/apache/tools/ant/Project.class
   [cactus] Running org.apache.hadoop.hdfsproxy.TestAuthorizationFilter
   [cactus] Tests run: 4, Failures: 2, Errors: 0, Time elapsed: 0.464 sec
   [cactus] Test org.apache.hadoop.hdfsproxy.TestAuthorizationFilter FAILED
   [cactus] Running org.apache.hadoop.hdfsproxy.TestLdapIpDirFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.373 sec
   [cactus] Tomcat 5.x started on port [21332]
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.315 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyForwardServlet
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.342 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyUtil
   [cactus] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0.823 sec
   [cactus] Tomcat 5.x is stopping...
   [cactus] Tomcat 5.x is stopped

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:753: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:734: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/build.xml:49: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/hdfsproxy/build.xml:343: Tests failed!

Total time: 49 minutes 11 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermit

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermit(TestAuthorizationFilter.java:113)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)


FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermitQualified

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermitQualified(TestAuthorizationFilter.java:136)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)




Hadoop-Hdfs-trunk - Build # 628 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/628/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 739272 lines...]
    [junit] 	... 11 more
    [junit] 2011-04-05 12:22:42,478 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-05 12:22:42,478 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-05 12:22:42,479 INFO  datanode.DataNode (DataNode.java:run(1496)) - DatanodeRegistration(127.0.0.1:60473, storageID=DS-2030027606-127.0.1.1-60473-1302006152029, infoPort=37469, ipcPort=48812):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}
    [junit] 2011-04-05 12:22:42,479 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 48812
    [junit] 2011-04-05 12:22:42,479 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-05 12:22:42,479 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-05 12:22:42,480 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-05 12:22:42,480 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-05 12:22:42,480 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-04-05 12:22:42,582 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 46639
    [junit] 2011-04-05 12:22:42,582 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 46639: exiting
    [junit] 2011-04-05 12:22:42,583 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 46639
    [junit] 2011-04-05 12:22:42,583 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] 2011-04-05 12:22:42,583 WARN  datanode.DataNode (DataXceiverServer.java:run(142)) - DatanodeRegistration(127.0.0.1:48393, storageID=DS-1552435921-127.0.1.1-48393-1302006151861, infoPort=37777, ipcPort=46639):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:135)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-04-05 12:22:42,584 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-05 12:22:42,684 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-05 12:22:42,685 INFO  datanode.DataNode (DataNode.java:run(1496)) - DatanodeRegistration(127.0.0.1:48393, storageID=DS-1552435921-127.0.1.1-48393-1302006151861, infoPort=37777, ipcPort=46639):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-04-05 12:22:42,685 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 46639
    [junit] 2011-04-05 12:22:42,685 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-05 12:22:42,685 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-05 12:22:42,685 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-05 12:22:42,686 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-05 12:22:42,788 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2857)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-05 12:22:42,788 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-05 12:22:42,788 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 8 3 
    [junit] 2011-04-05 12:22:42,790 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 58226
    [junit] 2011-04-05 12:22:42,790 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 58226: exiting
    [junit] 2011-04-05 12:22:42,790 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 58226
    [junit] 2011-04-05 12:22:42,790 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.457 sec

checkfailure:
    [touch] Creating /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/testsfailed

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:747: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:505: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/test/aop/build/aop.xml:230: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:688: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:662: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:730: Tests failed!

Total time: 49 minutes 32 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestFiDataTransferProtocol2.pipeline_Fi_29

Error Message:
null

Stack Trace:
junit.framework.AssertionFailedError: 
	at org.apache.hadoop.hdfs.server.datanode.TestFiDataTransferProtocol2.runTest29_30(TestFiDataTransferProtocol2.java:153)
	at org.apache.hadoop.hdfs.server.datanode.TestFiDataTransferProtocol2.pipeline_Fi_29(TestFiDataTransferProtocol2.java:251)




Hadoop-Hdfs-trunk - Build # 627 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/627/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 718791 lines...]
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2011-04-04 12:47:28,815 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-04 12:47:28,815 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-04 12:47:28,815 INFO  datanode.DataNode (DataNode.java:run(1496)) - DatanodeRegistration(127.0.0.1:57751, storageID=DS-1453852092-127.0.1.1-57751-1301921238223, infoPort=40641, ipcPort=44415):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}
    [junit] 2011-04-04 12:47:28,816 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 44415
    [junit] 2011-04-04 12:47:28,816 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-04 12:47:28,816 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-04 12:47:28,816 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-04 12:47:28,816 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-04 12:47:28,817 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-04-04 12:47:28,918 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 48120
    [junit] 2011-04-04 12:47:28,918 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 48120: exiting
    [junit] 2011-04-04 12:47:28,932 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 48120
    [junit] 2011-04-04 12:47:28,933 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] 2011-04-04 12:47:28,933 WARN  datanode.DataNode (DataXceiverServer.java:run(142)) - DatanodeRegistration(127.0.0.1:56077, storageID=DS-244328423-127.0.1.1-56077-1301921238026, infoPort=60079, ipcPort=48120):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:135)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-04-04 12:47:28,933 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-04-04 12:47:29,033 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-04 12:47:29,034 INFO  datanode.DataNode (DataNode.java:run(1496)) - DatanodeRegistration(127.0.0.1:56077, storageID=DS-244328423-127.0.1.1-56077-1301921238026, infoPort=60079, ipcPort=48120):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-04-04 12:47:29,034 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 48120
    [junit] 2011-04-04 12:47:29,034 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-04 12:47:29,034 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-04 12:47:29,035 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-04 12:47:29,035 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-04 12:47:29,137 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2857)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-04 12:47:29,137 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-04 12:47:29,137 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 11 2 
    [junit] 2011-04-04 12:47:29,139 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 45063
    [junit] 2011-04-04 12:47:29,139 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 45063: exiting
    [junit] 2011-04-04 12:47:29,140 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 45063
    [junit] 2011-04-04 12:47:29,140 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.543 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 73 minutes 42 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.TestLargeBlock.testLargeBlockSize

Error Message:
Timeout occurred. Please note the time in the report does not reflect the time until the timeout.

Stack Trace:
junit.framework.AssertionFailedError: Timeout occurred. Please note the time in the report does not reflect the time until the timeout.


REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_08

Error Message:
Wrong number of PendingReplication blocks expected:<2> but was:<1>

Stack Trace:
junit.framework.AssertionFailedError: Wrong number of PendingReplication blocks expected:<2> but was:<1>
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.__CLR3_0_2j2e00jrg8(TestBlockReport.java:414)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_08(TestBlockReport.java:390)




Hadoop-Hdfs-trunk - Build # 626 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/626/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 723583 lines...]
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2011-04-03 12:23:29,427 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-03 12:23:29,427 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-03 12:23:29,427 INFO  datanode.DataNode (DataNode.java:run(1496)) - DatanodeRegistration(127.0.0.1:51460, storageID=DS-1312723792-127.0.1.1-51460-1301833398906, infoPort=42430, ipcPort=50455):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}
    [junit] 2011-04-03 12:23:29,427 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 50455
    [junit] 2011-04-03 12:23:29,428 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-03 12:23:29,428 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-03 12:23:29,428 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-03 12:23:29,428 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-03 12:23:29,429 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-04-03 12:23:29,530 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 55674
    [junit] 2011-04-03 12:23:29,530 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 55674: exiting
    [junit] 2011-04-03 12:23:29,531 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 55674
    [junit] 2011-04-03 12:23:29,531 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] 2011-04-03 12:23:29,531 WARN  datanode.DataNode (DataXceiverServer.java:run(142)) - DatanodeRegistration(127.0.0.1:52044, storageID=DS-2081569357-127.0.1.1-52044-1301833398732, infoPort=47230, ipcPort=55674):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:135)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-04-03 12:23:29,531 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-04-03 12:23:29,632 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-03 12:23:29,632 INFO  datanode.DataNode (DataNode.java:run(1496)) - DatanodeRegistration(127.0.0.1:52044, storageID=DS-2081569357-127.0.1.1-52044-1301833398732, infoPort=47230, ipcPort=55674):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-04-03 12:23:29,632 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 55674
    [junit] 2011-04-03 12:23:29,633 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-03 12:23:29,633 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-03 12:23:29,633 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-03 12:23:29,634 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-03 12:23:29,645 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-03 12:23:29,645 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2857)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-03 12:23:29,645 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 4 3 
    [junit] 2011-04-03 12:23:29,647 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 37535
    [junit] 2011-04-03 12:23:29,647 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 37535: exiting
    [junit] 2011-04-03 12:23:29,647 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 37535
    [junit] 2011-04-03 12:23:29,647 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.37 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 49 minutes 53 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_09

Error Message:
Wrong number of PendingReplication blocks expected:<2> but was:<1>

Stack Trace:
junit.framework.AssertionFailedError: Wrong number of PendingReplication blocks expected:<2> but was:<1>
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.__CLR3_0_2fte182rgt(TestBlockReport.java:457)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_09(TestBlockReport.java:429)




Hadoop-Hdfs-trunk - Build # 625 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/625/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 713798 lines...]
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target
     [echo]  Including clover.jar in the war file ...
[cactifywar] Analyzing war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/hdfsproxy-2.0-test.war
[cactifywar] Building war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war

cactifywar:

test-cactus:
     [echo]  Free Ports: startup-34955 / http-34956 / https-34957
     [echo] Please take a deep breath while Cargo gets the Tomcat for running the servlet tests...
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/temp
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/logs
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/reports
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
   [cactus] -----------------------------------------------------------------
   [cactus] Running tests against Tomcat 5.x @ http://localhost:34956
   [cactus] -----------------------------------------------------------------
   [cactus] Deploying [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war] to [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps]...
   [cactus] Tomcat 5.x starting...
Server [Apache-Coyote/1.1] started
   [cactus] WARNING: multiple versions of ant detected in path for junit 
   [cactus]          jar:file:/homes/hudson/tools/ant/latest/lib/ant.jar!/org/apache/tools/ant/Project.class
   [cactus]      and jar:file:/homes/hudson/.ivy2/cache/ant/ant/jars/ant-1.6.5.jar!/org/apache/tools/ant/Project.class
   [cactus] Running org.apache.hadoop.hdfsproxy.TestAuthorizationFilter
   [cactus] Tests run: 4, Failures: 2, Errors: 0, Time elapsed: 0.455 sec
   [cactus] Test org.apache.hadoop.hdfsproxy.TestAuthorizationFilter FAILED
   [cactus] Running org.apache.hadoop.hdfsproxy.TestLdapIpDirFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.323 sec
   [cactus] Tomcat 5.x started on port [34956]
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.336 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyForwardServlet
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.316 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyUtil
   [cactus] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0.861 sec
   [cactus] Tomcat 5.x is stopping...
   [cactus] Tomcat 5.x is stopped

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:753: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:734: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/build.xml:49: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/hdfsproxy/build.xml:343: Tests failed!

Total time: 49 minutes 34 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermit

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermit(TestAuthorizationFilter.java:113)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)


FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermitQualified

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermitQualified(TestAuthorizationFilter.java:136)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)




Hadoop-Hdfs-trunk - Build # 624 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/624/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 738293 lines...]
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target
     [echo]  Including clover.jar in the war file ...
[cactifywar] Analyzing war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/hdfsproxy-2.0-test.war
[cactifywar] Building war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war

cactifywar:

test-cactus:
     [echo]  Free Ports: startup-25460 / http-25461 / https-25462
     [echo] Please take a deep breath while Cargo gets the Tomcat for running the servlet tests...
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/temp
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/logs
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/reports
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
   [cactus] -----------------------------------------------------------------
   [cactus] Running tests against Tomcat 5.x @ http://localhost:25461
   [cactus] -----------------------------------------------------------------
   [cactus] Deploying [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war] to [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps]...
   [cactus] Tomcat 5.x starting...
Server [Apache-Coyote/1.1] started
   [cactus] WARNING: multiple versions of ant detected in path for junit 
   [cactus]          jar:file:/homes/hudson/tools/ant/latest/lib/ant.jar!/org/apache/tools/ant/Project.class
   [cactus]      and jar:file:/homes/hudson/.ivy2/cache/ant/ant/jars/ant-1.6.5.jar!/org/apache/tools/ant/Project.class
   [cactus] Running org.apache.hadoop.hdfsproxy.TestAuthorizationFilter
   [cactus] Tests run: 4, Failures: 2, Errors: 0, Time elapsed: 0.489 sec
   [cactus] Test org.apache.hadoop.hdfsproxy.TestAuthorizationFilter FAILED
   [cactus] Running org.apache.hadoop.hdfsproxy.TestLdapIpDirFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.492 sec
   [cactus] Tomcat 5.x started on port [25461]
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.329 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyForwardServlet
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.34 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyUtil
   [cactus] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0.882 sec
   [cactus] Tomcat 5.x is stopping...
   [cactus] Tomcat 5.x is stopped

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:753: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:734: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/build.xml:49: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/hdfsproxy/build.xml:343: Tests failed!

Total time: 50 minutes 15 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermit

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermit(TestAuthorizationFilter.java:113)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)


FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermitQualified

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermitQualified(TestAuthorizationFilter.java:136)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)




Hadoop-Hdfs-trunk - Build # 623 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/623/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 710777 lines...]
    [junit] 2011-03-31 12:33:18,494 INFO  datanode.DataNode (BlockReceiver.java:run(926)) - PacketResponder blk_8517475587862166522_1001 0 : Thread is interrupted.
    [junit] 2011-03-31 12:33:18,494 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 3
    [junit] 2011-03-31 12:33:18,494 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] 2011-03-31 12:33:18,495 INFO  datanode.DataNode (BlockReceiver.java:run(1010)) - PacketResponder 0 for block blk_8517475587862166522_1001 terminating
    [junit] 2011-03-31 12:33:18,495 INFO  datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:51778, storageID=DS-1795582721-127.0.1.1-51778-1301574787630, infoPort=48804, ipcPort=48740)
    [junit] 2011-03-31 12:33:18,496 ERROR datanode.DataNode (DataXceiver.java:run(132)) - DatanodeRegistration(127.0.0.1:51778, storageID=DS-1795582721-127.0.1.1-51778-1301574787630, infoPort=48804, ipcPort=48740):DataXceiver
    [junit] java.lang.RuntimeException: java.lang.InterruptedException: sleep interrupted
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:82)
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:346)
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:463)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:651)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:360)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:390)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:332)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2011-03-31 12:33:18,497 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-31 12:33:18,598 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-31 12:33:18,598 INFO  datanode.DataNode (DataNode.java:run(1464)) - DatanodeRegistration(127.0.0.1:51778, storageID=DS-1795582721-127.0.1.1-51778-1301574787630, infoPort=48804, ipcPort=48740):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-03-31 12:33:18,598 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 48740
    [junit] 2011-03-31 12:33:18,598 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-31 12:33:18,599 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-31 12:33:18,599 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-31 12:33:18,599 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-31 12:33:18,701 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-31 12:33:18,701 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 6 3 
    [junit] 2011-03-31 12:33:18,701 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2857)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-31 12:33:18,703 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 59355
    [junit] 2011-03-31 12:33:18,703 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 59355: exiting
    [junit] 2011-03-31 12:33:18,703 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 59355
    [junit] 2011-03-31 12:33:18,703 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.512 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 59 minutes 47 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.testTransferRbw

Error Message:
65536 = numBytes < visible = 112047, r=ReplicaInPipeline, blk_5010047870379614353_1001, TEMPORARY   getNumBytes()     = 65536   getBytesOnDisk()  = 0   getVisibleLength()= -1   getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/current/finalized   getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/tmp/blk_5010047870379614353   bytesAcked=0   bytesOnDisk=0

Stack Trace:
org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException: 65536 = numBytes < visible = 112047, r=ReplicaInPipeline, blk_5010047870379614353_1001, TEMPORARY
  getNumBytes()     = 65536
  getBytesOnDisk()  = 0
  getVisibleLength()= -1
  getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/current/finalized
  getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/tmp/blk_5010047870379614353
  bytesAcked=0
  bytesOnDisk=0
	at org.apache.hadoop.hdfs.server.datanode.FSDataset.convertTemporaryToRbw(FSDataset.java:1375)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.convertTemporaryToRbw(DataNode.java:2021)
	at org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.__CLR3_0_2r95sa9tgw(TestTransferRbw.java:121)
	at org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.testTransferRbw(TestTransferRbw.java:63)




Hadoop-Hdfs-trunk - Build # 622 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/622/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 698802 lines...]
    [junit] 2011-03-30 12:22:26,214 INFO  datanode.DataNode (BlockReceiver.java:run(926)) - PacketResponder blk_-4878957023449505870_1001 0 : Thread is interrupted.
    [junit] 2011-03-30 12:22:26,214 INFO  datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:51648, storageID=DS-1891937375-127.0.1.1-51648-1301487735313, infoPort=55741, ipcPort=57432)
    [junit] 2011-03-30 12:22:26,214 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 2
    [junit] 2011-03-30 12:22:26,214 INFO  datanode.DataNode (BlockReceiver.java:run(1010)) - PacketResponder 0 for block blk_-4878957023449505870_1001 terminating
    [junit] 2011-03-30 12:22:26,214 INFO  datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:51648, storageID=DS-1891937375-127.0.1.1-51648-1301487735313, infoPort=55741, ipcPort=57432)
    [junit] 2011-03-30 12:22:26,215 ERROR datanode.DataNode (DataXceiver.java:run(132)) - DatanodeRegistration(127.0.0.1:51648, storageID=DS-1891937375-127.0.1.1-51648-1301487735313, infoPort=55741, ipcPort=57432):DataXceiver
    [junit] java.lang.RuntimeException: java.lang.InterruptedException: sleep interrupted
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:82)
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:346)
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:463)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:651)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:360)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:390)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:332)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2011-03-30 12:22:26,217 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-30 12:22:26,317 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-30 12:22:26,317 INFO  datanode.DataNode (DataNode.java:run(1464)) - DatanodeRegistration(127.0.0.1:51648, storageID=DS-1891937375-127.0.1.1-51648-1301487735313, infoPort=55741, ipcPort=57432):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-03-30 12:22:26,317 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 57432
    [junit] 2011-03-30 12:22:26,318 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-30 12:22:26,318 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-30 12:22:26,318 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-30 12:22:26,319 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-30 12:22:26,420 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2856)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-30 12:22:26,420 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-30 12:22:26,420 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 7 2 
    [junit] 2011-03-30 12:22:26,422 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 41503
    [junit] 2011-03-30 12:22:26,423 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 41503: exiting
    [junit] 2011-03-30 12:22:26,423 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 41503
    [junit] 2011-03-30 12:22:26,423 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.721 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 48 minutes 57 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
8 tests failed.
FAILED:  org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.testTransferRbw

Error Message:
65536 = numBytes < visible = 130213, r=ReplicaInPipeline, blk_-2736525394384087704_1001, TEMPORARY   getNumBytes()     = 65536   getBytesOnDisk()  = 0   getVisibleLength()= -1   getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/current/finalized   getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/tmp/blk_-2736525394384087704   bytesAcked=0   bytesOnDisk=0

Stack Trace:
org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException: 65536 = numBytes < visible = 130213, r=ReplicaInPipeline, blk_-2736525394384087704_1001, TEMPORARY
  getNumBytes()     = 65536
  getBytesOnDisk()  = 0
  getVisibleLength()= -1
  getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/current/finalized
  getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/tmp/blk_-2736525394384087704
  bytesAcked=0
  bytesOnDisk=0
	at org.apache.hadoop.hdfs.server.datanode.FSDataset.convertTemporaryToRbw(FSDataset.java:1375)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.convertTemporaryToRbw(DataNode.java:2021)
	at org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.__CLR3_0_2r95sa9tgv(TestTransferRbw.java:121)
	at org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.testTransferRbw(TestTransferRbw.java:63)


FAILED:  org.apache.hadoop.cli.TestHDFSCLI.testAll

Error Message:
One of the tests failed. See the Detailed results to identify the command that failed

Stack Trace:
junit.framework.AssertionFailedError: One of the tests failed. See the Detailed results to identify the command that failed
	at org.apache.hadoop.cli.CLITestHelper.displayResults(CLITestHelper.java:257)
	at org.apache.hadoop.cli.CLITestHelper.tearDown(CLITestHelper.java:119)
	at org.apache.hadoop.cli.TestHDFSCLI.tearDown(TestHDFSCLI.java:81)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testURIPaths

Error Message:
null expected:<[reptiles]> but was:<[supergroup]>

Stack Trace:
junit.framework.ComparisonFailure: null expected:<[reptiles]> but was:<[supergroup]>
	at org.apache.hadoop.hdfs.TestDFSShell.confirmOwner(TestDFSShell.java:846)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2ltte1n131j(TestDFSShell.java:516)
	at org.apache.hadoop.hdfs.TestDFSShell.testURIPaths(TestDFSShell.java:449)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testFilePermissions

Error Message:
null expected:<[reptiles]> but was:<[supergroup]>

Stack Trace:
junit.framework.ComparisonFailure: null expected:<[reptiles]> but was:<[supergroup]>
	at org.apache.hadoop.hdfs.TestDFSShell.confirmOwner(TestDFSShell.java:846)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_22e88ot1396(TestDFSShell.java:889)
	at org.apache.hadoop.hdfs.TestDFSShell.testFilePermissions(TestDFSShell.java:851)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testDFSShell

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2prqrtk13a5(TestDFSShell.java:920)
	at org.apache.hadoop.hdfs.TestDFSShell.testDFSShell(TestDFSShell.java:916)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testRemoteException

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2ayein613dt(TestDFSShell.java:1143)
	at org.apache.hadoop.hdfs.TestDFSShell.testRemoteException(TestDFSShell.java:1136)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testGet

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2tpje3v13es(TestDFSShell.java:1182)
	at org.apache.hadoop.hdfs.TestDFSShell.testGet(TestDFSShell.java:1179)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testLsr

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_22emby813g1(TestDFSShell.java:1240)
	at org.apache.hadoop.hdfs.TestDFSShell.testLsr(TestDFSShell.java:1238)




Hadoop-Hdfs-trunk - Build # 621 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/621/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 699203 lines...]
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:135)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-03-29 12:32:37,695 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-29 12:32:37,696 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-29 12:32:37,696 INFO  datanode.DataNode (DataNode.java:run(1464)) - DatanodeRegistration(127.0.0.1:53973, storageID=DS-777661655-127.0.1.1-53973-1301401947074, infoPort=47607, ipcPort=60453):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}
    [junit] 2011-03-29 12:32:37,696 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 60453
    [junit] 2011-03-29 12:32:37,696 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-29 12:32:37,698 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-29 12:32:37,698 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-29 12:32:37,698 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-29 12:32:37,699 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-03-29 12:32:37,800 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 41601
    [junit] 2011-03-29 12:32:37,800 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 41601: exiting
    [junit] 2011-03-29 12:32:37,801 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 41601
    [junit] 2011-03-29 12:32:37,801 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] 2011-03-29 12:32:37,801 WARN  datanode.DataNode (DataXceiverServer.java:run(142)) - DatanodeRegistration(127.0.0.1:47815, storageID=DS-976453409-127.0.1.1-47815-1301401946890, infoPort=49694, ipcPort=41601):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:135)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-03-29 12:32:37,801 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-29 12:32:37,902 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-29 12:32:37,902 INFO  datanode.DataNode (DataNode.java:run(1464)) - DatanodeRegistration(127.0.0.1:47815, storageID=DS-976453409-127.0.1.1-47815-1301401946890, infoPort=49694, ipcPort=41601):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-03-29 12:32:37,902 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 41601
    [junit] 2011-03-29 12:32:37,903 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-29 12:32:37,903 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-29 12:32:37,903 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-29 12:32:37,903 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-29 12:32:38,005 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2856)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-29 12:32:38,005 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 9 1 
    [junit] 2011-03-29 12:32:38,005 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-29 12:32:38,007 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 37641
    [junit] 2011-03-29 12:32:38,007 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 37641: exiting
    [junit] 2011-03-29 12:32:38,008 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 37641
    [junit] 2011-03-29 12:32:38,008 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.599 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 59 minutes 8 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
8 tests failed.
FAILED:  org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.testTransferRbw

Error Message:
65536 = numBytes < visible = 102717, r=ReplicaInPipeline, blk_-3557091731890250719_1001, TEMPORARY   getNumBytes()     = 65536   getBytesOnDisk()  = 0   getVisibleLength()= -1   getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/current/finalized   getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/tmp/blk_-3557091731890250719   bytesAcked=0   bytesOnDisk=0

Stack Trace:
org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException: 65536 = numBytes < visible = 102717, r=ReplicaInPipeline, blk_-3557091731890250719_1001, TEMPORARY
  getNumBytes()     = 65536
  getBytesOnDisk()  = 0
  getVisibleLength()= -1
  getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/current/finalized
  getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/tmp/blk_-3557091731890250719
  bytesAcked=0
  bytesOnDisk=0
	at org.apache.hadoop.hdfs.server.datanode.FSDataset.convertTemporaryToRbw(FSDataset.java:1376)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.convertTemporaryToRbw(DataNode.java:2021)
	at org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.__CLR3_0_2r95sa9tgv(TestTransferRbw.java:121)
	at org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.testTransferRbw(TestTransferRbw.java:63)


FAILED:  org.apache.hadoop.cli.TestHDFSCLI.testAll

Error Message:
One of the tests failed. See the Detailed results to identify the command that failed

Stack Trace:
junit.framework.AssertionFailedError: One of the tests failed. See the Detailed results to identify the command that failed
	at org.apache.hadoop.cli.CLITestHelper.displayResults(CLITestHelper.java:257)
	at org.apache.hadoop.cli.CLITestHelper.tearDown(CLITestHelper.java:119)
	at org.apache.hadoop.cli.TestHDFSCLI.tearDown(TestHDFSCLI.java:81)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testURIPaths

Error Message:
null expected:<[reptiles]> but was:<[supergroup]>

Stack Trace:
junit.framework.ComparisonFailure: null expected:<[reptiles]> but was:<[supergroup]>
	at org.apache.hadoop.hdfs.TestDFSShell.confirmOwner(TestDFSShell.java:846)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2ltte1n131j(TestDFSShell.java:516)
	at org.apache.hadoop.hdfs.TestDFSShell.testURIPaths(TestDFSShell.java:449)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testFilePermissions

Error Message:
null expected:<[reptiles]> but was:<[supergroup]>

Stack Trace:
junit.framework.ComparisonFailure: null expected:<[reptiles]> but was:<[supergroup]>
	at org.apache.hadoop.hdfs.TestDFSShell.confirmOwner(TestDFSShell.java:846)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_22e88ot1396(TestDFSShell.java:889)
	at org.apache.hadoop.hdfs.TestDFSShell.testFilePermissions(TestDFSShell.java:851)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testDFSShell

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2prqrtk13a5(TestDFSShell.java:920)
	at org.apache.hadoop.hdfs.TestDFSShell.testDFSShell(TestDFSShell.java:916)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testRemoteException

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2ayein613dt(TestDFSShell.java:1143)
	at org.apache.hadoop.hdfs.TestDFSShell.testRemoteException(TestDFSShell.java:1136)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testGet

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2tpje3v13es(TestDFSShell.java:1182)
	at org.apache.hadoop.hdfs.TestDFSShell.testGet(TestDFSShell.java:1179)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testLsr

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_22emby813g1(TestDFSShell.java:1240)
	at org.apache.hadoop.hdfs.TestDFSShell.testLsr(TestDFSShell.java:1238)




Hadoop-Hdfs-trunk - Build # 620 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/620/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 690185 lines...]
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2011-03-28 12:32:38,702 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-28 12:32:38,702 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-28 12:32:38,703 INFO  datanode.DataNode (DataNode.java:run(1464)) - DatanodeRegistration(127.0.0.1:59927, storageID=DS-1180270094-127.0.1.1-59927-1301315548093, infoPort=44831, ipcPort=43258):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}
    [junit] 2011-03-28 12:32:38,703 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 43258
    [junit] 2011-03-28 12:32:38,703 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-28 12:32:38,703 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-28 12:32:38,703 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-28 12:32:38,704 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-28 12:32:38,704 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-03-28 12:32:38,805 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 47290
    [junit] 2011-03-28 12:32:38,806 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 47290: exiting
    [junit] 2011-03-28 12:32:38,806 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 47290
    [junit] 2011-03-28 12:32:38,806 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] 2011-03-28 12:32:38,806 WARN  datanode.DataNode (DataXceiverServer.java:run(142)) - DatanodeRegistration(127.0.0.1:53628, storageID=DS-1160082990-127.0.1.1-53628-1301315547913, infoPort=32856, ipcPort=47290):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:135)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-03-28 12:32:38,806 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-03-28 12:32:38,907 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-28 12:32:38,908 INFO  datanode.DataNode (DataNode.java:run(1464)) - DatanodeRegistration(127.0.0.1:53628, storageID=DS-1160082990-127.0.1.1-53628-1301315547913, infoPort=32856, ipcPort=47290):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-03-28 12:32:38,908 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 47290
    [junit] 2011-03-28 12:32:38,908 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-28 12:32:38,908 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-28 12:32:38,908 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-28 12:32:38,909 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-28 12:32:39,012 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2856)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-28 12:32:39,012 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 5 3 
    [junit] 2011-03-28 12:32:39,012 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-28 12:32:39,014 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 43074
    [junit] 2011-03-28 12:32:39,015 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 43074: exiting
    [junit] 2011-03-28 12:32:39,015 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 43074
    [junit] 2011-03-28 12:32:39,015 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.6 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 59 minutes 5 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
8 tests failed.
FAILED:  org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.testTransferRbw

Error Message:
65536 = numBytes < visible = 79419, r=ReplicaInPipeline, blk_-6718005118883221936_1001, TEMPORARY   getNumBytes()     = 65536   getBytesOnDisk()  = 0   getVisibleLength()= -1   getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/current/finalized   getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/tmp/blk_-6718005118883221936   bytesAcked=0   bytesOnDisk=0

Stack Trace:
org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException: 65536 = numBytes < visible = 79419, r=ReplicaInPipeline, blk_-6718005118883221936_1001, TEMPORARY
  getNumBytes()     = 65536
  getBytesOnDisk()  = 0
  getVisibleLength()= -1
  getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/current/finalized
  getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/tmp/blk_-6718005118883221936
  bytesAcked=0
  bytesOnDisk=0
	at org.apache.hadoop.hdfs.server.datanode.FSDataset.convertTemporaryToRbw(FSDataset.java:1387)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.convertTemporaryToRbw(DataNode.java:2021)
	at org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.__CLR3_0_2r95sa9tgc(TestTransferRbw.java:121)
	at org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.testTransferRbw(TestTransferRbw.java:63)


FAILED:  org.apache.hadoop.cli.TestHDFSCLI.testAll

Error Message:
One of the tests failed. See the Detailed results to identify the command that failed

Stack Trace:
junit.framework.AssertionFailedError: One of the tests failed. See the Detailed results to identify the command that failed
	at org.apache.hadoop.cli.CLITestHelper.displayResults(CLITestHelper.java:257)
	at org.apache.hadoop.cli.CLITestHelper.tearDown(CLITestHelper.java:119)
	at org.apache.hadoop.cli.TestHDFSCLI.tearDown(TestHDFSCLI.java:81)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testURIPaths

Error Message:
null expected:<[reptiles]> but was:<[supergroup]>

Stack Trace:
junit.framework.ComparisonFailure: null expected:<[reptiles]> but was:<[supergroup]>
	at org.apache.hadoop.hdfs.TestDFSShell.confirmOwner(TestDFSShell.java:846)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2ltte1n1310(TestDFSShell.java:516)
	at org.apache.hadoop.hdfs.TestDFSShell.testURIPaths(TestDFSShell.java:449)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testFilePermissions

Error Message:
null expected:<[reptiles]> but was:<[supergroup]>

Stack Trace:
junit.framework.ComparisonFailure: null expected:<[reptiles]> but was:<[supergroup]>
	at org.apache.hadoop.hdfs.TestDFSShell.confirmOwner(TestDFSShell.java:846)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_22e88ot138n(TestDFSShell.java:889)
	at org.apache.hadoop.hdfs.TestDFSShell.testFilePermissions(TestDFSShell.java:851)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testDFSShell

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2prqrtk139m(TestDFSShell.java:920)
	at org.apache.hadoop.hdfs.TestDFSShell.testDFSShell(TestDFSShell.java:916)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testRemoteException

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2ayein613da(TestDFSShell.java:1143)
	at org.apache.hadoop.hdfs.TestDFSShell.testRemoteException(TestDFSShell.java:1136)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testGet

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2tpje3v13e9(TestDFSShell.java:1182)
	at org.apache.hadoop.hdfs.TestDFSShell.testGet(TestDFSShell.java:1179)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testLsr

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_22emby813fi(TestDFSShell.java:1240)
	at org.apache.hadoop.hdfs.TestDFSShell.testLsr(TestDFSShell.java:1238)




Hadoop-Hdfs-trunk - Build # 619 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/619/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 713643 lines...]
    [junit] 
    [junit] 2011-03-27 12:31:39,099 INFO  datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:43089, storageID=DS-402378168-127.0.1.1-43089-1301229088198, infoPort=54919, ipcPort=41226)
    [junit] 2011-03-27 12:31:39,099 INFO  datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:43089, storageID=DS-402378168-127.0.1.1-43089-1301229088198, infoPort=54919, ipcPort=41226)
    [junit] 2011-03-27 12:31:39,098 INFO  datanode.DataNode (BlockReceiver.java:run(926)) - PacketResponder blk_-573293244434035474_1001 0 : Thread is interrupted.
    [junit] 2011-03-27 12:31:39,099 INFO  datanode.DataNode (BlockReceiver.java:run(1010)) - PacketResponder 0 for block blk_-573293244434035474_1001 terminating
    [junit] 2011-03-27 12:31:39,099 ERROR datanode.DataNode (DataXceiver.java:run(132)) - DatanodeRegistration(127.0.0.1:43089, storageID=DS-402378168-127.0.1.1-43089-1301229088198, infoPort=54919, ipcPort=41226):DataXceiver
    [junit] java.lang.RuntimeException: java.lang.InterruptedException: sleep interrupted
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:82)
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:346)
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:463)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:651)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:393)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:390)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:332)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2011-03-27 12:31:39,101 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-27 12:31:39,201 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-27 12:31:39,201 INFO  datanode.DataNode (DataNode.java:run(1464)) - DatanodeRegistration(127.0.0.1:43089, storageID=DS-402378168-127.0.1.1-43089-1301229088198, infoPort=54919, ipcPort=41226):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-03-27 12:31:39,202 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 41226
    [junit] 2011-03-27 12:31:39,202 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-27 12:31:39,202 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-27 12:31:39,202 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-27 12:31:39,203 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-27 12:31:39,304 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2856)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-27 12:31:39,304 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 10 3 
    [junit] 2011-03-27 12:31:39,305 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-27 12:31:39,306 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 42780
    [junit] 2011-03-27 12:31:39,307 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 42780: exiting
    [junit] 2011-03-27 12:31:39,307 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 42780
    [junit] 2011-03-27 12:31:39,308 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.42 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 58 minutes 12 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
8 tests failed.
FAILED:  org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.testTransferRbw

Error Message:
65536 = numBytes < visible = 84255, r=ReplicaInPipeline, blk_-8473050276165535237_1001, TEMPORARY   getNumBytes()     = 65536   getBytesOnDisk()  = 65536   getVisibleLength()= -1   getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/current/finalized   getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/tmp/blk_-8473050276165535237   bytesAcked=0   bytesOnDisk=65536

Stack Trace:
org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException: 65536 = numBytes < visible = 84255, r=ReplicaInPipeline, blk_-8473050276165535237_1001, TEMPORARY
  getNumBytes()     = 65536
  getBytesOnDisk()  = 65536
  getVisibleLength()= -1
  getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/current/finalized
  getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/tmp/blk_-8473050276165535237
  bytesAcked=0
  bytesOnDisk=65536
	at org.apache.hadoop.hdfs.server.datanode.FSDataset.convertTemporaryToRbw(FSDataset.java:1387)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.convertTemporaryToRbw(DataNode.java:2021)
	at org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.__CLR3_0_2r95sa9tgc(TestTransferRbw.java:121)
	at org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.testTransferRbw(TestTransferRbw.java:63)


FAILED:  org.apache.hadoop.cli.TestHDFSCLI.testAll

Error Message:
One of the tests failed. See the Detailed results to identify the command that failed

Stack Trace:
junit.framework.AssertionFailedError: One of the tests failed. See the Detailed results to identify the command that failed
	at org.apache.hadoop.cli.CLITestHelper.displayResults(CLITestHelper.java:257)
	at org.apache.hadoop.cli.CLITestHelper.tearDown(CLITestHelper.java:119)
	at org.apache.hadoop.cli.TestHDFSCLI.tearDown(TestHDFSCLI.java:81)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testURIPaths

Error Message:
null expected:<[reptiles]> but was:<[supergroup]>

Stack Trace:
junit.framework.ComparisonFailure: null expected:<[reptiles]> but was:<[supergroup]>
	at org.apache.hadoop.hdfs.TestDFSShell.confirmOwner(TestDFSShell.java:846)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2ltte1n1310(TestDFSShell.java:516)
	at org.apache.hadoop.hdfs.TestDFSShell.testURIPaths(TestDFSShell.java:449)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testFilePermissions

Error Message:
null expected:<[reptiles]> but was:<[supergroup]>

Stack Trace:
junit.framework.ComparisonFailure: null expected:<[reptiles]> but was:<[supergroup]>
	at org.apache.hadoop.hdfs.TestDFSShell.confirmOwner(TestDFSShell.java:846)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_22e88ot138n(TestDFSShell.java:889)
	at org.apache.hadoop.hdfs.TestDFSShell.testFilePermissions(TestDFSShell.java:851)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testDFSShell

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2prqrtk139m(TestDFSShell.java:920)
	at org.apache.hadoop.hdfs.TestDFSShell.testDFSShell(TestDFSShell.java:916)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testRemoteException

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2ayein613da(TestDFSShell.java:1143)
	at org.apache.hadoop.hdfs.TestDFSShell.testRemoteException(TestDFSShell.java:1136)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testGet

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2tpje3v13e9(TestDFSShell.java:1182)
	at org.apache.hadoop.hdfs.TestDFSShell.testGet(TestDFSShell.java:1179)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testLsr

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_22emby813fi(TestDFSShell.java:1240)
	at org.apache.hadoop.hdfs.TestDFSShell.testLsr(TestDFSShell.java:1238)




Hadoop-Hdfs-trunk - Build # 618 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/618/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 711591 lines...]
    [junit] 2011-03-26 12:22:04,765 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 2
    [junit] 2011-03-26 12:22:04,766 INFO  datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:54302, storageID=DS-279690435-127.0.1.1-54302-1301142113914, infoPort=34030, ipcPort=42716)
    [junit] 2011-03-26 12:22:04,766 INFO  datanode.DataNode (BlockReceiver.java:run(926)) - PacketResponder blk_-8470746226147512846_1001 0 : Thread is interrupted.
    [junit] 2011-03-26 12:22:04,766 INFO  datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:54302, storageID=DS-279690435-127.0.1.1-54302-1301142113914, infoPort=34030, ipcPort=42716)
    [junit] 2011-03-26 12:22:04,766 INFO  datanode.DataNode (BlockReceiver.java:run(1010)) - PacketResponder 0 for block blk_-8470746226147512846_1001 terminating
    [junit] 2011-03-26 12:22:04,767 ERROR datanode.DataNode (DataXceiver.java:run(132)) - DatanodeRegistration(127.0.0.1:54302, storageID=DS-279690435-127.0.1.1-54302-1301142113914, infoPort=34030, ipcPort=42716):DataXceiver
    [junit] java.lang.RuntimeException: java.lang.InterruptedException: sleep interrupted
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:82)
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:346)
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:463)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:651)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:393)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:390)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:332)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2011-03-26 12:22:04,768 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-26 12:22:04,868 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-26 12:22:04,868 INFO  datanode.DataNode (DataNode.java:run(1464)) - DatanodeRegistration(127.0.0.1:54302, storageID=DS-279690435-127.0.1.1-54302-1301142113914, infoPort=34030, ipcPort=42716):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-03-26 12:22:04,869 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 42716
    [junit] 2011-03-26 12:22:04,869 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-26 12:22:04,869 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-26 12:22:04,869 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-26 12:22:04,870 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-26 12:22:04,882 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-26 12:22:04,882 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2856)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-26 12:22:04,882 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 3 3 
    [junit] 2011-03-26 12:22:04,884 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 51057
    [junit] 2011-03-26 12:22:04,884 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 51057: exiting
    [junit] 2011-03-26 12:22:04,884 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 51057
    [junit] 2011-03-26 12:22:04,884 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.623 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 48 minutes 37 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
8 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.testTransferRbw

Error Message:
65536 = numBytes < visible = 125757, r=ReplicaInPipeline, blk_-6894539633202504254_1001, TEMPORARY   getNumBytes()     = 65536   getBytesOnDisk()  = 65536   getVisibleLength()= -1   getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/current/finalized   getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/tmp/blk_-6894539633202504254   bytesAcked=0   bytesOnDisk=65536

Stack Trace:
org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException: 65536 = numBytes < visible = 125757, r=ReplicaInPipeline, blk_-6894539633202504254_1001, TEMPORARY
  getNumBytes()     = 65536
  getBytesOnDisk()  = 65536
  getVisibleLength()= -1
  getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/current/finalized
  getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/tmp/blk_-6894539633202504254
  bytesAcked=0
  bytesOnDisk=65536
	at org.apache.hadoop.hdfs.server.datanode.FSDataset.convertTemporaryToRbw(FSDataset.java:1387)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.convertTemporaryToRbw(DataNode.java:2021)
	at org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.__CLR3_0_2r95sa9tgc(TestTransferRbw.java:121)
	at org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.testTransferRbw(TestTransferRbw.java:63)


FAILED:  org.apache.hadoop.cli.TestHDFSCLI.testAll

Error Message:
One of the tests failed. See the Detailed results to identify the command that failed

Stack Trace:
junit.framework.AssertionFailedError: One of the tests failed. See the Detailed results to identify the command that failed
	at org.apache.hadoop.cli.CLITestHelper.displayResults(CLITestHelper.java:257)
	at org.apache.hadoop.cli.CLITestHelper.tearDown(CLITestHelper.java:119)
	at org.apache.hadoop.cli.TestHDFSCLI.tearDown(TestHDFSCLI.java:81)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testURIPaths

Error Message:
null expected:<[reptiles]> but was:<[supergroup]>

Stack Trace:
junit.framework.ComparisonFailure: null expected:<[reptiles]> but was:<[supergroup]>
	at org.apache.hadoop.hdfs.TestDFSShell.confirmOwner(TestDFSShell.java:846)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2ltte1n1310(TestDFSShell.java:516)
	at org.apache.hadoop.hdfs.TestDFSShell.testURIPaths(TestDFSShell.java:449)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testFilePermissions

Error Message:
null expected:<[reptiles]> but was:<[supergroup]>

Stack Trace:
junit.framework.ComparisonFailure: null expected:<[reptiles]> but was:<[supergroup]>
	at org.apache.hadoop.hdfs.TestDFSShell.confirmOwner(TestDFSShell.java:846)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_22e88ot138n(TestDFSShell.java:889)
	at org.apache.hadoop.hdfs.TestDFSShell.testFilePermissions(TestDFSShell.java:851)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testDFSShell

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2prqrtk139m(TestDFSShell.java:920)
	at org.apache.hadoop.hdfs.TestDFSShell.testDFSShell(TestDFSShell.java:916)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testRemoteException

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2ayein613da(TestDFSShell.java:1143)
	at org.apache.hadoop.hdfs.TestDFSShell.testRemoteException(TestDFSShell.java:1136)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testGet

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2tpje3v13e9(TestDFSShell.java:1182)
	at org.apache.hadoop.hdfs.TestDFSShell.testGet(TestDFSShell.java:1179)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testLsr

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_22emby813fi(TestDFSShell.java:1240)
	at org.apache.hadoop.hdfs.TestDFSShell.testLsr(TestDFSShell.java:1238)




Hadoop-Hdfs-trunk - Build # 617 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/617/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 706775 lines...]
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2011-03-25 12:23:16,150 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-25 12:23:16,151 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-25 12:23:16,151 INFO  datanode.DataNode (DataNode.java:run(1464)) - DatanodeRegistration(127.0.0.1:36251, storageID=DS-468696142-127.0.1.1-36251-1301055785538, infoPort=55782, ipcPort=36232):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}
    [junit] 2011-03-25 12:23:16,151 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 36232
    [junit] 2011-03-25 12:23:16,151 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-25 12:23:16,151 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-25 12:23:16,152 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-25 12:23:16,152 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-25 12:23:16,152 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-03-25 12:23:16,254 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 52971
    [junit] 2011-03-25 12:23:16,254 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 52971: exiting
    [junit] 2011-03-25 12:23:16,254 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 52971
    [junit] 2011-03-25 12:23:16,254 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] 2011-03-25 12:23:16,255 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-03-25 12:23:16,255 WARN  datanode.DataNode (DataXceiverServer.java:run(142)) - DatanodeRegistration(127.0.0.1:41445, storageID=DS-1304287072-127.0.1.1-41445-1301055785353, infoPort=53886, ipcPort=52971):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:135)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-03-25 12:23:16,257 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-25 12:23:16,357 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-25 12:23:16,358 INFO  datanode.DataNode (DataNode.java:run(1464)) - DatanodeRegistration(127.0.0.1:41445, storageID=DS-1304287072-127.0.1.1-41445-1301055785353, infoPort=53886, ipcPort=52971):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-03-25 12:23:16,358 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 52971
    [junit] 2011-03-25 12:23:16,358 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-25 12:23:16,358 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-25 12:23:16,359 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-25 12:23:16,359 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-25 12:23:16,460 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2856)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-25 12:23:16,460 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-25 12:23:16,461 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 8 3 
    [junit] 2011-03-25 12:23:16,463 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 36444
    [junit] 2011-03-25 12:23:16,463 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 36444: exiting
    [junit] 2011-03-25 12:23:16,463 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 36444
    [junit] 2011-03-25 12:23:16,463 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.347 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 49 minutes 52 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
7 tests failed.
FAILED:  org.apache.hadoop.cli.TestHDFSCLI.testAll

Error Message:
One of the tests failed. See the Detailed results to identify the command that failed

Stack Trace:
junit.framework.AssertionFailedError: One of the tests failed. See the Detailed results to identify the command that failed
	at org.apache.hadoop.cli.CLITestHelper.displayResults(CLITestHelper.java:257)
	at org.apache.hadoop.cli.CLITestHelper.tearDown(CLITestHelper.java:119)
	at org.apache.hadoop.cli.TestHDFSCLI.tearDown(TestHDFSCLI.java:81)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testURIPaths

Error Message:
null expected:<[reptiles]> but was:<[supergroup]>

Stack Trace:
junit.framework.ComparisonFailure: null expected:<[reptiles]> but was:<[supergroup]>
	at org.apache.hadoop.hdfs.TestDFSShell.confirmOwner(TestDFSShell.java:846)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2ltte1n130s(TestDFSShell.java:516)
	at org.apache.hadoop.hdfs.TestDFSShell.testURIPaths(TestDFSShell.java:449)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testFilePermissions

Error Message:
null expected:<[reptiles]> but was:<[supergroup]>

Stack Trace:
junit.framework.ComparisonFailure: null expected:<[reptiles]> but was:<[supergroup]>
	at org.apache.hadoop.hdfs.TestDFSShell.confirmOwner(TestDFSShell.java:846)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_22e88ot138f(TestDFSShell.java:889)
	at org.apache.hadoop.hdfs.TestDFSShell.testFilePermissions(TestDFSShell.java:851)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testDFSShell

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2prqrtk139e(TestDFSShell.java:920)
	at org.apache.hadoop.hdfs.TestDFSShell.testDFSShell(TestDFSShell.java:916)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testRemoteException

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2ayein613d2(TestDFSShell.java:1143)
	at org.apache.hadoop.hdfs.TestDFSShell.testRemoteException(TestDFSShell.java:1136)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testGet

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2tpje3v13e1(TestDFSShell.java:1182)
	at org.apache.hadoop.hdfs.TestDFSShell.testGet(TestDFSShell.java:1179)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testLsr

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_22emby813fa(TestDFSShell.java:1240)
	at org.apache.hadoop.hdfs.TestDFSShell.testLsr(TestDFSShell.java:1238)




Hadoop-Hdfs-trunk - Build # 616 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/616/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 708649 lines...]
    [junit] 2011-03-24 12:22:35,345 INFO  datanode.DataNode (BlockReceiver.java:run(914)) - PacketResponder blk_6538719823285349735_1001 0 : Thread is interrupted.
    [junit] 2011-03-24 12:22:35,344 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 3
    [junit] 2011-03-24 12:22:35,344 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 34307
    [junit] 2011-03-24 12:22:35,345 INFO  datanode.DataNode (BlockReceiver.java:run(999)) - PacketResponder 0 for block blk_6538719823285349735_1001 terminating
    [junit] 2011-03-24 12:22:35,345 INFO  datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:42931, storageID=DS-1663091717-127.0.1.1-42931-1300969344471, infoPort=45787, ipcPort=34307)
    [junit] 2011-03-24 12:22:35,346 ERROR datanode.DataNode (DataXceiver.java:run(132)) - DatanodeRegistration(127.0.0.1:42931, storageID=DS-1663091717-127.0.1.1-42931-1300969344471, infoPort=45787, ipcPort=34307):DataXceiver
    [junit] java.lang.RuntimeException: java.lang.InterruptedException: sleep interrupted
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:82)
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:346)
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:451)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:639)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:390)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:390)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:332)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2011-03-24 12:22:35,347 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-24 12:22:35,448 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-24 12:22:35,448 INFO  datanode.DataNode (DataNode.java:run(1464)) - DatanodeRegistration(127.0.0.1:42931, storageID=DS-1663091717-127.0.1.1-42931-1300969344471, infoPort=45787, ipcPort=34307):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-03-24 12:22:35,448 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 34307
    [junit] 2011-03-24 12:22:35,448 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-24 12:22:35,449 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-24 12:22:35,449 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-24 12:22:35,449 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-24 12:22:35,551 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2856)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-24 12:22:35,551 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 6 3 
    [junit] 2011-03-24 12:22:35,551 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-24 12:22:35,553 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 44144
    [junit] 2011-03-24 12:22:35,553 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 44144: exiting
    [junit] 2011-03-24 12:22:35,554 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 44144
    [junit] 2011-03-24 12:22:35,554 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.558 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 48 minutes 29 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
7 tests failed.
REGRESSION:  org.apache.hadoop.cli.TestHDFSCLI.testAll

Error Message:
One of the tests failed. See the Detailed results to identify the command that failed

Stack Trace:
junit.framework.AssertionFailedError: One of the tests failed. See the Detailed results to identify the command that failed
	at org.apache.hadoop.cli.CLITestHelper.displayResults(CLITestHelper.java:257)
	at org.apache.hadoop.cli.CLITestHelper.tearDown(CLITestHelper.java:119)
	at org.apache.hadoop.cli.TestHDFSCLI.tearDown(TestHDFSCLI.java:81)


REGRESSION:  org.apache.hadoop.hdfs.TestDFSShell.testURIPaths

Error Message:
null expected:<[reptiles]> but was:<[supergroup]>

Stack Trace:
junit.framework.ComparisonFailure: null expected:<[reptiles]> but was:<[supergroup]>
	at org.apache.hadoop.hdfs.TestDFSShell.confirmOwner(TestDFSShell.java:846)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2ltte1n130s(TestDFSShell.java:516)
	at org.apache.hadoop.hdfs.TestDFSShell.testURIPaths(TestDFSShell.java:449)


REGRESSION:  org.apache.hadoop.hdfs.TestDFSShell.testFilePermissions

Error Message:
null expected:<[reptiles]> but was:<[supergroup]>

Stack Trace:
junit.framework.ComparisonFailure: null expected:<[reptiles]> but was:<[supergroup]>
	at org.apache.hadoop.hdfs.TestDFSShell.confirmOwner(TestDFSShell.java:846)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_22e88ot138f(TestDFSShell.java:889)
	at org.apache.hadoop.hdfs.TestDFSShell.testFilePermissions(TestDFSShell.java:851)


REGRESSION:  org.apache.hadoop.hdfs.TestDFSShell.testDFSShell

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2prqrtk139e(TestDFSShell.java:920)
	at org.apache.hadoop.hdfs.TestDFSShell.testDFSShell(TestDFSShell.java:916)


REGRESSION:  org.apache.hadoop.hdfs.TestDFSShell.testRemoteException

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2ayein613d2(TestDFSShell.java:1143)
	at org.apache.hadoop.hdfs.TestDFSShell.testRemoteException(TestDFSShell.java:1136)


REGRESSION:  org.apache.hadoop.hdfs.TestDFSShell.testGet

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2tpje3v13e1(TestDFSShell.java:1182)
	at org.apache.hadoop.hdfs.TestDFSShell.testGet(TestDFSShell.java:1179)


REGRESSION:  org.apache.hadoop.hdfs.TestDFSShell.testLsr

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_22emby813fa(TestDFSShell.java:1240)
	at org.apache.hadoop.hdfs.TestDFSShell.testLsr(TestDFSShell.java:1238)