You are viewing a plain text version of this content. The canonical link for it is here.
Posted to mapreduce-dev@hadoop.apache.org by Apache Jenkins Server <je...@builds.apache.org> on 2015/10/13 23:27:08 UTC

Hadoop-Mapreduce-trunk - Build # 2470 - Still Failing

See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2470/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32990 lines...]

Tests in error: 
  TestRMContainerAllocator.testReportedAppProgress:970 » NullPointer
  TestRMContainerAllocator.testBlackListedNodesWithSchedulingToThatNode:1605 » NullPointer
  TestRMContainerAllocator.testBlackListedNodes:1318 » NullPointer
  TestRMContainerAllocator.testReducerRampdownDiagnostics:450 » NullPointer
  TestRMContainerAllocator.testReportedAppProgressWithOnlyMaps:1118 » NullPointer
  TestRMContainerAllocator.testUpdatedNodes:1213 » NullPointer
  TestRMContainerAllocator.testSimple:231 » NullPointer
  TestRMContainerAllocator.testIgnoreBlacklisting:1393->getContainerOnHost:1511 » NullPointer
  TestRMContainerAllocator.testRMContainerAllocatorResendsRequestsOnRMRestart:2509 » NullPointer

Tests run: 339, Failures: 0, Errors: 9, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.747 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:35 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 29.940 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.061 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [08:51 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 11:05 min
[INFO] Finished at: 2015-10-13T21:27:33+00:00
[INFO] Final Memory: 41M/825M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9145
Updating HDFS-9235
Updating HADOOP-12449
Updating HADOOP-11515
Updating HADOOP-12474
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
9 tests failed.
FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReportedAppProgress

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReportedAppProgress(TestRMContainerAllocator.java:970)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testBlackListedNodesWithSchedulingToThatNode

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testBlackListedNodesWithSchedulingToThatNode(TestRMContainerAllocator.java:1605)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testBlackListedNodes

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testBlackListedNodes(TestRMContainerAllocator.java:1318)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReducerRampdownDiagnostics

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReducerRampdownDiagnostics(TestRMContainerAllocator.java:450)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReportedAppProgressWithOnlyMaps

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReportedAppProgressWithOnlyMaps(TestRMContainerAllocator.java:1118)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testUpdatedNodes

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testUpdatedNodes(TestRMContainerAllocator.java:1213)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testSimple

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testSimple(TestRMContainerAllocator.java:231)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testIgnoreBlacklisting

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.getContainerOnHost(TestRMContainerAllocator.java:1511)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testIgnoreBlacklisting(TestRMContainerAllocator.java:1393)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testRMContainerAllocatorResendsRequestsOnRMRestart

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testRMContainerAllocatorResendsRequestsOnRMRestart(TestRMContainerAllocator.java:2509)



Hadoop-Mapreduce-trunk - Build # 2585 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2585/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32283 lines...]
Running org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebApp
Tests run: 12, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 43.738 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebApp
Running org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobConf
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 15.833 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobConf
Running org.apache.hadoop.mapred.TestTaskAttemptListenerImpl
Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.396 sec - in org.apache.hadoop.mapred.TestTaskAttemptListenerImpl
Running org.apache.hadoop.mapred.TestTaskAttemptFinishingMonitor
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 4.511 sec - in org.apache.hadoop.mapred.TestTaskAttemptFinishingMonitor
Running org.apache.hadoop.mapred.TestLocalContainerLauncher
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.177 sec - in org.apache.hadoop.mapred.TestLocalContainerLauncher

Results :

Failed tests: 
  TestMRApp.testUpdatedNodes:262 Expecting 2 more completion events for killed expected:<4> but was:<2>

Tests run: 340, Failures: 1, Errors: 0, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  8.151 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [04:26 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [01:14 min]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [ 14.016 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [17:05 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 23:14 min
[INFO] Finished at: 2015-11-09T08:05:06+00:00
[INFO] Final Memory: 51M/1402M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating MAPREDUCE-5763
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.mapreduce.v2.app.TestMRApp.testUpdatedNodes

Error Message:
Expecting 2 more completion events for killed expected:<4> but was:<2>

Stack Trace:
java.lang.AssertionError: Expecting 2 more completion events for killed expected:<4> but was:<2>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:555)
	at org.apache.hadoop.mapreduce.v2.app.TestMRApp.testUpdatedNodes(TestMRApp.java:262)



Hadoop-Mapreduce-trunk - Build # 2584 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2584/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32799 lines...]
Tests run: 12, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 351.658 sec - in org.apache.hadoop.mapreduce.v2.TestUberAM
Running org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 66.355 sec - in org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService
Running org.apache.hadoop.mapreduce.v2.TestMRAMWithNonNormalizedCapabilities
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 61.977 sec - in org.apache.hadoop.mapreduce.v2.TestMRAMWithNonNormalizedCapabilities
Running org.apache.hadoop.mapreduce.TestValueIterReset
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.53 sec - in org.apache.hadoop.mapreduce.TestValueIterReset
Running org.apache.hadoop.mapreduce.TestMapReduceLazyOutput
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 149.239 sec - in org.apache.hadoop.mapreduce.TestMapReduceLazyOutput

Results :

Tests in error: 
  TestMRTimelineEventHandling.testMRTimelineEventHandling:99 » IO Job didn't fin...
  TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled:162 » IO Jo...

Tests run: 523, Failures: 0, Errors: 2, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.481 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:56 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 32.856 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  7.107 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [10:01 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:59 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:46 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:05 h
[INFO] Finished at: 2015-11-08T23:09:35+00:00
[INFO] Final Memory: 37M/1058M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HADOOP-11684
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling(TestMRTimelineEventHandling.java:99)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled(TestMRTimelineEventHandling.java:162)



Hadoop-Mapreduce-trunk - Build # 2583 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2583/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32282 lines...]
Running org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebApp
Tests run: 12, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 38.996 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebApp
Running org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobConf
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 14.886 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobConf
Running org.apache.hadoop.mapred.TestTaskAttemptListenerImpl
Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.131 sec - in org.apache.hadoop.mapred.TestTaskAttemptListenerImpl
Running org.apache.hadoop.mapred.TestTaskAttemptFinishingMonitor
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 4.307 sec - in org.apache.hadoop.mapred.TestTaskAttemptFinishingMonitor
Running org.apache.hadoop.mapred.TestLocalContainerLauncher
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.325 sec - in org.apache.hadoop.mapred.TestLocalContainerLauncher

Results :

Failed tests: 
  TestKill.testKillJob:84 Task state not correct expected:<KILLED> but was:<NEW>

Tests run: 340, Failures: 1, Errors: 0, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  7.708 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [03:56 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [01:02 min]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [ 12.904 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [15:48 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 21:10 min
[INFO] Finished at: 2015-11-08T13:07:55+00:00
[INFO] Final Memory: 47M/1253M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9057
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.mapreduce.v2.app.TestKill.testKillJob

Error Message:
Task state not correct expected:<KILLED> but was:<NEW>

Stack Trace:
java.lang.AssertionError: Task state not correct expected:<KILLED> but was:<NEW>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.apache.hadoop.mapreduce.v2.app.TestKill.testKillJob(TestKill.java:84)



Hadoop-Mapreduce-trunk - Build # 2582 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2582/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 31995 lines...]
Running org.apache.hadoop.mapred.TestMapFileOutputFormat
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.837 sec - in org.apache.hadoop.mapred.TestMapFileOutputFormat
Running org.apache.hadoop.mapred.TestJobAclsManager
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.324 sec - in org.apache.hadoop.mapred.TestJobAclsManager
Running org.apache.hadoop.mapred.TestLineRecordReader
Tests run: 12, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 17.353 sec - in org.apache.hadoop.mapred.TestLineRecordReader
Running org.apache.hadoop.mapred.TestClock
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.144 sec - in org.apache.hadoop.mapred.TestClock
Running org.apache.hadoop.mapred.TestJobQueueClient
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.186 sec - in org.apache.hadoop.mapred.TestJobQueueClient

Results :

Failed tests: 
  TestJobEndNotifier.testNotificationTimeout:182 null

Tests run: 208, Failures: 1, Errors: 0, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  7.782 s]
[INFO] Apache Hadoop MapReduce Core ...................... FAILURE [04:32 min]
[INFO] Apache Hadoop MapReduce Common .................... SKIPPED
[INFO] Apache Hadoop MapReduce Shuffle ................... SKIPPED
[INFO] Apache Hadoop MapReduce App ....................... SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 04:44 min
[INFO] Finished at: 2015-11-08T11:44:20+00:00
[INFO] Final Memory: 39M/1292M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-core: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-core
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HADOOP-12553
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.mapred.TestJobEndNotifier.testNotificationTimeout

Error Message:
null

Stack Trace:
junit.framework.AssertionFailedError: null
	at junit.framework.Assert.fail(Assert.java:55)
	at junit.framework.Assert.assertTrue(Assert.java:22)
	at junit.framework.Assert.assertTrue(Assert.java:31)
	at junit.framework.TestCase.assertTrue(TestCase.java:201)
	at org.apache.hadoop.mapred.TestJobEndNotifier.testNotificationTimeout(TestJobEndNotifier.java:182)



Hadoop-Mapreduce-trunk - Build # 2581 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2581/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32795 lines...]
Running org.apache.hadoop.mapred.pipes.TestPipeApplication
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.938 sec - in org.apache.hadoop.mapred.pipes.TestPipeApplication
Running org.apache.hadoop.mapred.pipes.TestPipes
Tests run: 1, Failures: 0, Errors: 0, Skipped: 1, Time elapsed: 0.06 sec - in org.apache.hadoop.mapred.pipes.TestPipes
Running org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 28.342 sec - in org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Running org.apache.hadoop.mapred.TestReporter
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 13.961 sec - in org.apache.hadoop.mapred.TestReporter

Results :

Tests in error: 
  TestMRTimelineEventHandling.testMRTimelineEventHandling:99 » IO Job didn't fin...
  TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled:162 » IO Jo...

Tests run: 523, Failures: 0, Errors: 2, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.251 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:49 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 31.999 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.744 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [10:01 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:46 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:43 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:01 h
[INFO] Finished at: 2015-11-07T19:46:19+00:00
[INFO] Final Memory: 34M/751M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9394
Updating HADOOP-12366
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling(TestMRTimelineEventHandling.java:99)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled(TestMRTimelineEventHandling.java:162)



Hadoop-Mapreduce-trunk - Build # 2580 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2580/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32794 lines...]
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.495 sec - in org.apache.hadoop.mapred.pipes.TestPipesNonJavaInputFormat
Running org.apache.hadoop.mapred.pipes.TestPipeApplication
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.934 sec - in org.apache.hadoop.mapred.pipes.TestPipeApplication
Running org.apache.hadoop.mapred.pipes.TestPipes
Tests run: 1, Failures: 0, Errors: 0, Skipped: 1, Time elapsed: 0.048 sec - in org.apache.hadoop.mapred.pipes.TestPipes
Running org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 28.431 sec - in org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Running org.apache.hadoop.mapred.TestReporter
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 13.832 sec - in org.apache.hadoop.mapred.TestReporter

Results :

Tests in error: 
  TestMRTimelineEventHandling.testMRTimelineEventHandling:99 » IO Job didn't fin...
  TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled:162 » IO Jo...

Tests run: 523, Failures: 0, Errors: 2, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.164 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:50 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 32.476 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.567 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:49 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:48 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:54 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:12 h
[INFO] Finished at: 2015-11-07T17:14:01+00:00
[INFO] Final Memory: 34M/715M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9398
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling(TestMRTimelineEventHandling.java:99)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled(TestMRTimelineEventHandling.java:162)



Hadoop-Mapreduce-trunk - Build # 2579 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2579/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32279 lines...]
Running org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler
Tests run: 13, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 25.266 sec - in org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler
Running org.apache.hadoop.mapreduce.jobhistory.TestEvents
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.003 sec - in org.apache.hadoop.mapreduce.jobhistory.TestEvents
Running org.apache.hadoop.mapred.TestTaskAttemptFinishingMonitor
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.223 sec - in org.apache.hadoop.mapred.TestTaskAttemptFinishingMonitor
Running org.apache.hadoop.mapred.TestTaskAttemptListenerImpl
Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.108 sec - in org.apache.hadoop.mapred.TestTaskAttemptListenerImpl
Running org.apache.hadoop.mapred.TestLocalContainerLauncher
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.5 sec - in org.apache.hadoop.mapred.TestLocalContainerLauncher

Results :

Failed tests: 
  TestJobImpl.testUnusableNodeTransition:629->assertJobState:1012 expected:<SUCCEEDED> but was:<ERROR>

Tests run: 340, Failures: 1, Errors: 0, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.332 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:53 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 32.502 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.761 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [10:03 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 12:39 min
[INFO] Finished at: 2015-11-07T05:16:38+00:00
[INFO] Final Memory: 41M/825M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9379
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl.testUnusableNodeTransition

Error Message:
expected:<SUCCEEDED> but was:<ERROR>

Stack Trace:
java.lang.AssertionError: expected:<SUCCEEDED> but was:<ERROR>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:144)
	at org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl.assertJobState(TestJobImpl.java:1012)
	at org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl.testUnusableNodeTransition(TestJobImpl.java:629)



Hadoop-Mapreduce-trunk - Build # 2578 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2578/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32799 lines...]
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.372 sec - in org.apache.hadoop.mapred.pipes.TestPipeApplication
Running org.apache.hadoop.mapred.pipes.TestPipes
Tests run: 1, Failures: 0, Errors: 0, Skipped: 1, Time elapsed: 0.046 sec - in org.apache.hadoop.mapred.pipes.TestPipes
Running org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 26.478 sec - in org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Running org.apache.hadoop.mapred.TestReporter
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 13.56 sec - in org.apache.hadoop.mapred.TestReporter

Results :

Tests in error: 
  TestMRTimelineEventHandling.testMRTimelineEventHandling:99 » IO Job didn't fin...
  TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled:162 » IO Jo...

Tests run: 523, Failures: 0, Errors: 2, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  4.234 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:59 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 33.262 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.627 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:58 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:46 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:45 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:03 h
[INFO] Finished at: 2015-11-07T00:06:52+00:00
[INFO] Final Memory: 43M/1074M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9236
Updating HDFS-6481
Updating HDFS-9318
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling(TestMRTimelineEventHandling.java:99)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled(TestMRTimelineEventHandling.java:162)



Hadoop-Mapreduce-trunk - Build # 2577 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2577/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32797 lines...]
Running org.apache.hadoop.mapreduce.TestMRJobClient
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 183.962 sec - in org.apache.hadoop.mapreduce.TestMRJobClient
Running org.apache.hadoop.mapreduce.TestMapCollection
Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 13.994 sec - in org.apache.hadoop.mapreduce.TestMapCollection
Running org.apache.hadoop.conf.TestNoDefaultsJobConf
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 23.238 sec - in org.apache.hadoop.conf.TestNoDefaultsJobConf
Running org.apache.hadoop.util.TestMRCJCReflectionUtils
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.639 sec - in org.apache.hadoop.util.TestMRCJCReflectionUtils
Running org.apache.hadoop.util.TestMRCJCRunJar
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.248 sec - in org.apache.hadoop.util.TestMRCJCRunJar

Results :

Tests in error: 
  TestMRTimelineEventHandling.testMRTimelineEventHandling:99 » IO Job didn't fin...
  TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled:162 » IO Jo...

Tests run: 523, Failures: 0, Errors: 2, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.818 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:40 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 29.359 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.834 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:20 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:38 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:39 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 01:56 h
[INFO] Finished at: 2015-11-06T19:55:48+00:00
[INFO] Final Memory: 36M/678M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling(TestMRTimelineEventHandling.java:99)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled(TestMRTimelineEventHandling.java:162)



Hadoop-Mapreduce-trunk - Build # 2576 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2576/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32400 lines...]
	at org.mockito.Mockito.mock(Mockito.java:921)
	at org.mockito.Mockito.mock(Mockito.java:816)
	at org.apache.hadoop.mapreduce.v2.hs.TestJobListCache.testAddExisting(TestJobListCache.java:39)

Running org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 44.956 sec - in org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents
Running org.apache.hadoop.mapreduce.v2.hs.TestCompletedTask
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.314 sec - in org.apache.hadoop.mapreduce.v2.hs.TestCompletedTask
Running org.apache.hadoop.mapreduce.v2.hs.TestHistoryFileManager
Tests run: 8, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 23.197 sec - in org.apache.hadoop.mapreduce.v2.hs.TestHistoryFileManager

Results :

Tests in error: 
  TestJobListCache.testAddExisting:39 »  test timed out after 1000 milliseconds

Tests run: 198, Failures: 0, Errors: 1, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  5.714 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [03:12 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 49.180 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  9.856 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [13:52 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. FAILURE [07:50 min]
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 26:02 min
[INFO] Finished at: 2015-11-06T09:04:33+00:00
[INFO] Final Memory: 45M/1236M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-hs: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-hs
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9384
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.mapreduce.v2.hs.TestJobListCache.testAddExisting

Error Message:
test timed out after 1000 milliseconds

Stack Trace:
java.lang.Exception: test timed out after 1000 milliseconds
	at java.lang.AbstractStringBuilder.<init>(AbstractStringBuilder.java:64)
	at java.lang.StringBuilder.<init>(StringBuilder.java:85)
	at java.io.UnixFileSystem.resolve(UnixFileSystem.java:108)
	at java.io.File.<init>(File.java:367)
	at sun.misc.URLClassPath$FileLoader.getResource(URLClassPath.java:1077)
	at sun.misc.URLClassPath.getResource(URLClassPath.java:199)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:358)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.mockito.cglib.proxy.Enhancer.<clinit>(Enhancer.java:63)
	at org.mockito.internal.creation.jmock.ClassImposterizer.createProxyClass(ClassImposterizer.java:68)
	at org.mockito.internal.creation.jmock.ClassImposterizer.imposterise(ClassImposterizer.java:50)
	at org.mockito.internal.util.MockUtil.createMock(MockUtil.java:54)
	at org.mockito.internal.MockitoCore.mock(MockitoCore.java:45)
	at org.mockito.Mockito.mock(Mockito.java:921)
	at org.mockito.Mockito.mock(Mockito.java:816)
	at org.apache.hadoop.mapreduce.v2.hs.TestJobListCache.testAddExisting(TestJobListCache.java:39)



Hadoop-Mapreduce-trunk - Build # 2575 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2575/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32796 lines...]
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.441 sec - in org.apache.hadoop.mapred.pipes.TestPipesNonJavaInputFormat
Running org.apache.hadoop.mapred.pipes.TestPipeApplication
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.608 sec - in org.apache.hadoop.mapred.pipes.TestPipeApplication
Running org.apache.hadoop.mapred.pipes.TestPipes
Tests run: 1, Failures: 0, Errors: 0, Skipped: 1, Time elapsed: 0.047 sec - in org.apache.hadoop.mapred.pipes.TestPipes
Running org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 26.512 sec - in org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Running org.apache.hadoop.mapred.TestReporter
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 13.615 sec - in org.apache.hadoop.mapred.TestReporter

Results :

Tests in error: 
  TestMRTimelineEventHandling.testMRTimelineEventHandling:99 » IO Job didn't fin...
  TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled:162 » IO Jo...

Tests run: 523, Failures: 0, Errors: 2, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.809 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:39 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 28.926 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  4.870 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:23 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:38 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:40 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 01:57 h
[INFO] Finished at: 2015-11-06T07:34:08+00:00
[INFO] Final Memory: 34M/750M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HADOOP-11684
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling(TestMRTimelineEventHandling.java:99)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled(TestMRTimelineEventHandling.java:162)



Hadoop-Mapreduce-trunk - Build # 2574 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2574/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32792 lines...]
Running org.apache.hadoop.mapred.pipes.TestPipeApplication
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.376 sec - in org.apache.hadoop.mapred.pipes.TestPipeApplication
Running org.apache.hadoop.mapred.pipes.TestPipes
Tests run: 1, Failures: 0, Errors: 0, Skipped: 1, Time elapsed: 0.047 sec - in org.apache.hadoop.mapred.pipes.TestPipes
Running org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 26.394 sec - in org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Running org.apache.hadoop.mapred.TestReporter
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 13.554 sec - in org.apache.hadoop.mapred.TestReporter

Results :

Tests in error: 
  TestMRTimelineEventHandling.testMRTimelineEventHandling:99 » IO Job didn't fin...
  TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled:162 » IO Jo...

Tests run: 523, Failures: 0, Errors: 2, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.271 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:39 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 28.980 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.027 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:12 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:35 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:47 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:04 h
[INFO] Finished at: 2015-11-06T04:24:00+00:00
[INFO] Final Memory: 36M/719M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating MAPREDUCE-3914
Updating MAPREDUCE-1125
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling(TestMRTimelineEventHandling.java:99)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled(TestMRTimelineEventHandling.java:162)



Hadoop-Mapreduce-trunk - Build # 2573 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2573/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32799 lines...]
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.899 sec - in org.apache.hadoop.mapred.pipes.TestPipeApplication
Running org.apache.hadoop.mapred.TestJavaSerialization
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 4.705 sec - in org.apache.hadoop.mapred.TestJavaSerialization
Running org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 28.45 sec - in org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Running org.apache.hadoop.ipc.TestMRCJCSocketFactory
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 20.802 sec - in org.apache.hadoop.ipc.TestMRCJCSocketFactory

Results :

Tests in error: 
  TestMRTimelineEventHandling.testMRTimelineEventHandling:99 » IO Job didn't fin...
  TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled:162 » IO Jo...

Tests run: 523, Failures: 0, Errors: 2, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.411 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:50 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 31.784 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.567 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:55 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:49 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:46 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:04 h
[INFO] Finished at: 2015-11-06T01:34:16+00:00
[INFO] Final Memory: 34M/698M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9282
Updating HDFS-9377
Updating HADOOP-11887
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling(TestMRTimelineEventHandling.java:99)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled(TestMRTimelineEventHandling.java:162)



Hadoop-Mapreduce-trunk - Build # 2572 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2572/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32788 lines...]
Running org.apache.hadoop.mapreduce.v2.TestMRAppWithCombiner
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 97.413 sec - in org.apache.hadoop.mapreduce.v2.TestMRAppWithCombiner
Running org.apache.hadoop.mapreduce.v2.TestUberAM
Tests run: 12, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 357.604 sec - in org.apache.hadoop.mapreduce.v2.TestUberAM
Running org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 65.878 sec - in org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService
Running org.apache.hadoop.mapreduce.v2.TestMRAMWithNonNormalizedCapabilities
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 60.634 sec - in org.apache.hadoop.mapreduce.v2.TestMRAMWithNonNormalizedCapabilities
Running org.apache.hadoop.mapreduce.TestValueIterReset
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.452 sec - in org.apache.hadoop.mapreduce.TestValueIterReset
Running org.apache.hadoop.mapreduce.TestMapReduceLazyOutput
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 144.85 sec - in org.apache.hadoop.mapreduce.TestMapReduceLazyOutput

Results :

Tests in error: 
  TestMRTimelineEventHandling.testMRTimelineEventHandling:99 » IO Job didn't fin...
  TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled:162 » IO Jo...

Tests run: 521, Failures: 0, Errors: 2, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.300 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [02:00 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 35.204 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  7.715 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [10:02 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [06:01 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:59 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:18 h
[INFO] Finished at: 2015-11-05T21:25:18+00:00
[INFO] Final Memory: 46M/1095M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There was a timeout or other error in the fork -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9372
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling(TestMRTimelineEventHandling.java:99)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled(TestMRTimelineEventHandling.java:162)



Hadoop-Mapreduce-trunk - Build # 2571 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2571/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32864 lines...]
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 7.852 sec - in org.apache.hadoop.mapreduce.lib.chain.TestChainErrors
Running org.apache.hadoop.mapreduce.lib.map.TestMultithreadedMapper
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 6.065 sec - in org.apache.hadoop.mapreduce.lib.map.TestMultithreadedMapper
Running org.apache.hadoop.mapreduce.TestMapCollection
Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 14.395 sec - in org.apache.hadoop.mapreduce.TestMapCollection
Running org.apache.hadoop.io.TestSequenceFileMergeProgress
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.512 sec - in org.apache.hadoop.io.TestSequenceFileMergeProgress

Results :

Tests in error: 
  TestMRTimelineEventHandling.testMRTimelineEventHandling:99 » IO Job didn't fin...
  TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled:162 » IO Jo...
  TestSpecialCharactersInOutputPath.testJobWithDFS:112 » YarnRuntime java.io.IOE...
  TestNetworkedJob.testNetworkedJob:190 » YarnRuntime java.lang.reflect.Invocati...

Tests run: 511, Failures: 0, Errors: 4, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.875 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:41 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 31.446 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  4.950 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:29 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:38 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:43 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:01 h
[INFO] Finished at: 2015-11-05T18:00:51+00:00
[INFO] Final Memory: 35M/980M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: ExecutionException: java.lang.RuntimeException: The forked VM terminated without properly saying goodbye. VM crash or System.exit called?
[ERROR] Command was /bin/sh -c cd /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient && /home/jenkins/tools/java/jdk1.7.0_55/jre/bin/java -Xmx2048m -XX:MaxPermSize=768m -XX:+HeapDumpOnOutOfMemoryError -jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefirebooter2449961279491604788.jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefire2182081451013091918tmp /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefire_2566350981806119243194tmp
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-4937
Updating HDFS-9378
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
4 tests failed.
FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob

Error Message:
java.lang.reflect.InvocationTargetException

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: java.lang.reflect.InvocationTargetException
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.proto.YarnServiceProtos$GetClusterMetricsRequestProto.newBuilder(YarnServiceProtos.java:16068)
	at org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterMetricsRequestPBImpl.<init>(GetClusterMetricsRequestPBImpl.java:37)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
	at org.apache.hadoop.yarn.factories.impl.pb.RecordFactoryPBImpl.newRecordInstance(RecordFactoryPBImpl.java:70)
	at org.apache.hadoop.yarn.util.Records.newRecord(Records.java:36)
	at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getYarnClusterMetrics(YarnClientImpl.java:514)
	at org.apache.hadoop.mapred.ResourceMgrDelegate.getClusterMetrics(ResourceMgrDelegate.java:154)
	at org.apache.hadoop.mapred.YARNRunner.getClusterMetrics(YARNRunner.java:189)
	at org.apache.hadoop.mapreduce.Cluster.getClusterStatus(Cluster.java:247)
	at org.apache.hadoop.mapred.JobClient$4.run(JobClient.java:798)
	at org.apache.hadoop.mapred.JobClient$4.run(JobClient.java:796)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapred.JobClient.getClusterStatus(JobClient.java:796)
	at org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob(TestNetworkedJob.java:190)


FAILED:  org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath.testJobWithDFS

Error Message:
java.io.IOException: ResourceManager failed to start. Final state is STOPPED

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: java.io.IOException: ResourceManager failed to start. Final state is STOPPED
	at org.apache.hadoop.yarn.server.MiniYARNCluster.startResourceManager(MiniYARNCluster.java:331)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$500(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceStart(MiniYARNCluster.java:457)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.service.CompositeService.serviceStart(CompositeService.java:120)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceStart(MiniMRYarnCluster.java:191)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:80)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath.testJobWithDFS(TestSpecialCharactersInOutputPath.java:112)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling(TestMRTimelineEventHandling.java:99)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled(TestMRTimelineEventHandling.java:162)



Hadoop-Mapreduce-trunk - Build # 2570 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2570/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32788 lines...]
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.387 sec - in org.apache.hadoop.mapred.pipes.TestPipesNonJavaInputFormat
Running org.apache.hadoop.mapred.pipes.TestPipeApplication
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.999 sec - in org.apache.hadoop.mapred.pipes.TestPipeApplication
Running org.apache.hadoop.mapred.pipes.TestPipes
Tests run: 1, Failures: 0, Errors: 0, Skipped: 1, Time elapsed: 0.047 sec - in org.apache.hadoop.mapred.pipes.TestPipes
Running org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 27.061 sec - in org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Running org.apache.hadoop.mapred.TestReporter
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 13.585 sec - in org.apache.hadoop.mapred.TestReporter

Results :

Tests in error: 
  TestMRTimelineEventHandling.testMRTimelineEventHandling:99 » IO Job didn't fin...
  TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled:162 » IO Jo...

Tests run: 523, Failures: 0, Errors: 2, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.029 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:43 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 29.296 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  4.961 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:20 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:39 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:43 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:00 h
[INFO] Finished at: 2015-11-05T13:00:31+00:00
[INFO] Final Memory: 34M/719M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9360
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling(TestMRTimelineEventHandling.java:99)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled(TestMRTimelineEventHandling.java:162)



Hadoop-Mapreduce-trunk - Build # 2569 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2569/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32803 lines...]
Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 15.847 sec - in org.apache.hadoop.mapreduce.TestMapCollection
Running org.apache.hadoop.io.TestSequenceFileMergeProgress
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.882 sec - in org.apache.hadoop.io.TestSequenceFileMergeProgress

Results :

Tests in error: 
  TestMRTimelineEventHandling.testMRTimelineEventHandling:99 » IO Job didn't fin...
  TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled:162 » IO Jo...

Tests run: 523, Failures: 0, Errors: 2, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.873 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:39 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 29.611 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.032 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:15 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:34 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:45 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:02 h
[INFO] Finished at: 2015-11-04T22:38:22+00:00
[INFO] Final Memory: 36M/752M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9007
Updating HDFS-9363
Updating HDFS-8855
Updating HDFS-9357
Updating HADOOP-12540
Updating HDFS-9331
Updating HADOOP-12296
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling(TestMRTimelineEventHandling.java:99)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled(TestMRTimelineEventHandling.java:162)



Hadoop-Mapreduce-trunk - Build # 2568 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2568/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32791 lines...]
Running org.apache.hadoop.mapred.pipes.TestPipesNonJavaInputFormat
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.36 sec - in org.apache.hadoop.mapred.pipes.TestPipesNonJavaInputFormat
Running org.apache.hadoop.mapred.pipes.TestPipeApplication
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 7.475 sec - in org.apache.hadoop.mapred.pipes.TestPipeApplication
Running org.apache.hadoop.mapred.TestJavaSerialization
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 4.613 sec - in org.apache.hadoop.mapred.TestJavaSerialization
Running org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 26.611 sec - in org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Running org.apache.hadoop.ipc.TestMRCJCSocketFactory
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 20.376 sec - in org.apache.hadoop.ipc.TestMRCJCSocketFactory

Results :

Tests in error: 
  TestMRTimelineEventHandling.testMRTimelineEventHandling:99 » IO Job didn't fin...
  TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled:162 » IO Jo...

Tests run: 523, Failures: 0, Errors: 2, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.306 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:41 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 30.470 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.138 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:26 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:37 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:44 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:02 h
[INFO] Finished at: 2015-11-04T19:01:29+00:00
[INFO] Final Memory: 36M/694M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling(TestMRTimelineEventHandling.java:99)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled(TestMRTimelineEventHandling.java:162)



Hadoop-Mapreduce-trunk - Build # 2567 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2567/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32395 lines...]
Running org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesAttempts
Tests run: 15, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 25.124 sec - in org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesAttempts
Running org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServices
Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 20.917 sec - in org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServices
Running org.apache.hadoop.mapreduce.v2.hs.webapp.TestBlocks
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.645 sec - in org.apache.hadoop.mapreduce.v2.hs.webapp.TestBlocks
Running org.apache.hadoop.mapreduce.v2.hs.webapp.TestHSWebApp
Tests run: 15, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 25.094 sec - in org.apache.hadoop.mapreduce.v2.hs.webapp.TestHSWebApp
Running org.apache.hadoop.mapreduce.v2.hs.TestCompletedTask
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.042 sec - in org.apache.hadoop.mapreduce.v2.hs.TestCompletedTask

Results :

Tests in error: 
  TestJobListCache.testAddExisting:38 »  test timed out after 1000 milliseconds

Tests run: 198, Failures: 0, Errors: 1, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  8.602 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [05:06 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [01:18 min]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [ 14.148 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [17:44 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. FAILURE [10:14 min]
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 34:51 min
[INFO] Finished at: 2015-11-04T12:13:03+00:00
[INFO] Final Memory: 42M/806M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-hs: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-hs
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HADOOP-10787
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.mapreduce.v2.hs.TestJobListCache.testAddExisting

Error Message:
test timed out after 1000 milliseconds

Stack Trace:
java.lang.Exception: test timed out after 1000 milliseconds
	at org.apache.xerces.util.XMLChar.isContent(Unknown Source)
	at org.apache.xerces.impl.XMLEntityScanner.scanContent(Unknown Source)
	at org.apache.xerces.impl.XMLDocumentFragmentScannerImpl.scanContent(Unknown Source)
	at org.apache.xerces.impl.XMLDocumentFragmentScannerImpl$FragmentContentDispatcher.dispatch(Unknown Source)
	at org.apache.xerces.impl.XMLDocumentFragmentScannerImpl.scanDocument(Unknown Source)
	at org.apache.xerces.parsers.XML11Configuration.parse(Unknown Source)
	at org.apache.xerces.parsers.XML11Configuration.parse(Unknown Source)
	at org.apache.xerces.parsers.XMLParser.parse(Unknown Source)
	at org.apache.xerces.parsers.DOMParser.parse(Unknown Source)
	at org.apache.xerces.jaxp.DocumentBuilderImpl.parse(Unknown Source)
	at javax.xml.parsers.DocumentBuilder.parse(DocumentBuilder.java:150)
	at org.apache.hadoop.conf.Configuration.parse(Configuration.java:2555)
	at org.apache.hadoop.conf.Configuration.parse(Configuration.java:2543)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:2614)
	at org.apache.hadoop.conf.Configuration.loadResources(Configuration.java:2567)
	at org.apache.hadoop.conf.Configuration.getProps(Configuration.java:2471)
	at org.apache.hadoop.conf.Configuration.get(Configuration.java:1296)
	at org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider.getRecordFactory(RecordFactoryProvider.java:49)
	at org.apache.hadoop.yarn.util.Records.<clinit>(Records.java:32)
	at org.apache.hadoop.yarn.api.records.ApplicationId.newInstance(ApplicationId.java:49)
	at org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils.newJobId(MRBuilderUtils.java:46)
	at org.apache.hadoop.mapreduce.v2.hs.TestJobListCache.testAddExisting(TestJobListCache.java:38)



Hadoop-Mapreduce-trunk - Build # 2566 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2566/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32793 lines...]
Tests run: 12, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 333.673 sec - in org.apache.hadoop.mapreduce.v2.TestUberAM
Running org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 68.081 sec - in org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService
Running org.apache.hadoop.mapreduce.v2.TestMRAMWithNonNormalizedCapabilities
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 60.576 sec - in org.apache.hadoop.mapreduce.v2.TestMRAMWithNonNormalizedCapabilities
Running org.apache.hadoop.mapreduce.TestValueIterReset
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.608 sec - in org.apache.hadoop.mapreduce.TestValueIterReset
Running org.apache.hadoop.mapreduce.TestMapReduceLazyOutput
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 147.303 sec - in org.apache.hadoop.mapreduce.TestMapReduceLazyOutput

Results :

Tests in error: 
  TestMRTimelineEventHandling.testMRTimelineEventHandling:99 » IO Job didn't fin...
  TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled:162 » IO Jo...

Tests run: 523, Failures: 0, Errors: 2, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.782 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [02:03 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 34.159 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  7.108 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [10:16 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [06:04 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:49 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:09 h
[INFO] Finished at: 2015-11-04T10:20:59+00:00
[INFO] Final Memory: 35M/708M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HADOOP-12544
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling(TestMRTimelineEventHandling.java:99)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled(TestMRTimelineEventHandling.java:162)



Hadoop-Mapreduce-trunk - Build # 2565 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2565/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32377 lines...]
Running org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesAttempts
Tests run: 15, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 21.182 sec - in org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesAttempts
Running org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServices
Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 16.988 sec - in org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServices
Running org.apache.hadoop.mapreduce.v2.hs.webapp.TestBlocks
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.454 sec - in org.apache.hadoop.mapreduce.v2.hs.webapp.TestBlocks
Running org.apache.hadoop.mapreduce.v2.hs.webapp.TestHSWebApp
Tests run: 15, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 23.439 sec - in org.apache.hadoop.mapreduce.v2.hs.webapp.TestHSWebApp
Running org.apache.hadoop.mapreduce.v2.hs.TestCompletedTask
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.931 sec - in org.apache.hadoop.mapreduce.v2.hs.TestCompletedTask

Results :

Tests in error: 
  TestJobListCache.testAddExisting:39 »  test timed out after 1000 milliseconds

Tests run: 198, Failures: 0, Errors: 1, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  6.614 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [04:01 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [01:04 min]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [ 12.554 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [17:01 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. FAILURE [09:20 min]
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 31:50 min
[INFO] Finished at: 2015-11-04T05:05:19+00:00
[INFO] Final Memory: 49M/1180M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-hs: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-hs
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9351
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.mapreduce.v2.hs.TestJobListCache.testAddExisting

Error Message:
test timed out after 1000 milliseconds

Stack Trace:
java.lang.Exception: test timed out after 1000 milliseconds
	at org.mockito.internal.MockitoCore.<init>(MockitoCore.java:40)
	at org.mockito.Mockito.<clinit>(Mockito.java:674)
	at org.apache.hadoop.mapreduce.v2.hs.TestJobListCache.testAddExisting(TestJobListCache.java:39)



Hadoop-Mapreduce-trunk - Build # 2564 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2564/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32035 lines...]
Running org.apache.hadoop.mapred.TestMapFileOutputFormat
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.332 sec - in org.apache.hadoop.mapred.TestMapFileOutputFormat
Running org.apache.hadoop.mapred.TestJobAclsManager
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.57 sec - in org.apache.hadoop.mapred.TestJobAclsManager
Running org.apache.hadoop.mapred.TestLineRecordReader
Tests run: 12, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 19.284 sec - in org.apache.hadoop.mapred.TestLineRecordReader
Running org.apache.hadoop.mapred.TestClock
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.208 sec - in org.apache.hadoop.mapred.TestClock
Running org.apache.hadoop.mapred.TestJobQueueClient
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.23 sec - in org.apache.hadoop.mapred.TestJobQueueClient

Results :

Failed tests: 
  TestJobEndNotifier.testNotificationTimeout:182 null

Tests run: 208, Failures: 1, Errors: 0, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [ 12.018 s]
[INFO] Apache Hadoop MapReduce Core ...................... FAILURE [05:08 min]
[INFO] Apache Hadoop MapReduce Common .................... SKIPPED
[INFO] Apache Hadoop MapReduce Shuffle ................... SKIPPED
[INFO] Apache Hadoop MapReduce App ....................... SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 05:24 min
[INFO] Finished at: 2015-11-04T00:32:29+00:00
[INFO] Final Memory: 44M/1292M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-core: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-core
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9289
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.mapred.TestJobEndNotifier.testNotificationTimeout

Error Message:
null

Stack Trace:
junit.framework.AssertionFailedError: null
	at junit.framework.Assert.fail(Assert.java:55)
	at junit.framework.Assert.assertTrue(Assert.java:22)
	at junit.framework.Assert.assertTrue(Assert.java:31)
	at junit.framework.TestCase.assertTrue(TestCase.java:201)
	at org.apache.hadoop.mapred.TestJobEndNotifier.testNotificationTimeout(TestJobEndNotifier.java:182)



Hadoop-Mapreduce-trunk - Build # 2563 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2563/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32514 lines...]
  TestJobEndNotifier.testNotificationOnLastRetryNormalShutdown:204 NoClassDefFound
  TestJobEndNotifier.testAbsentNotificationOnNotLastRetryUnregistrationFailure:227 NoClassDefFound
  TestJobEndNotifier.testNotificationOnLastRetryUnregistrationFailure:254 NoClassDefFound
  TestJobHistoryEventHandler.testFirstFlushOnCompletionEvent:109 » ExceptionInInitializer
  TestJobHistoryEventHandler.testProcessDoneFilesNotLastAMRetry:330 » NoClassDefFound
  TestJobHistoryEventHandler.testTimelineEventHandling:489 » NoClassDefFound Cou...
  TestJobHistoryEventHandler.testProcessDoneFilesOnLastAMRetry:284 » NoClassDefFound
  TestJobHistoryEventHandler.testDefaultFsIsUsedForHistory:387 » NoClassDefFound
  TestJobHistoryEventHandler.testSigTermedFunctionality:794 » NoClassDefFound or...
  TestJobHistoryEventHandler.testMaxUnflushedCompletionEvents:152 » NoClassDefFound
  TestJobHistoryEventHandler.testAMStartedEvent:447 » NoClassDefFound Could not ...
  TestJobHistoryEventHandler.testUnflushedTimer:197 » NoClassDefFound Could not ...
  TestJobHistoryEventHandler.testBatchedFlushJobEndMultiplier:240 » NoClassDefFound

Tests run: 340, Failures: 0, Errors: 18, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.346 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:43 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 32.582 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.471 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [08:39 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 11:06 min
[INFO] Finished at: 2015-11-03T20:45:01+00:00
[INFO] Final Memory: 52M/1249M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9362
Updating HDFS-9354
Updating YARN-4326
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
18 tests failed.
FAILED:  org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testFirstFlushOnCompletionEvent

Error Message:
null

Stack Trace:
java.lang.ExceptionInInitializerError: null
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.Class.forName0(Native Method)
	at java.lang.Class.forName(Class.java:190)
	at org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider.getFactoryClassInstance(RecordFactoryProvider.java:57)
	at org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider.getRecordFactory(RecordFactoryProvider.java:52)
	at org.apache.hadoop.yarn.util.Records.<clinit>(Records.java:32)
	at org.apache.hadoop.yarn.api.records.ApplicationId.newInstance(ApplicationId.java:49)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler$TestParams.<init>(TestJobHistoryEventHandler.java:763)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler$TestParams.<init>(TestJobHistoryEventHandler.java:773)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testFirstFlushOnCompletionEvent(TestJobHistoryEventHandler.java:109)


FAILED:  org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testProcessDoneFilesNotLastAMRetry

Error Message:
Could not initialize class org.apache.hadoop.yarn.util.Records

Stack Trace:
java.lang.NoClassDefFoundError: Could not initialize class org.apache.hadoop.yarn.util.Records
	at org.apache.hadoop.yarn.api.records.ApplicationId.newInstance(ApplicationId.java:49)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler$TestParams.<init>(TestJobHistoryEventHandler.java:763)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testProcessDoneFilesNotLastAMRetry(TestJobHistoryEventHandler.java:330)


FAILED:  org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testTimelineEventHandling

Error Message:
Could not initialize class org.apache.hadoop.yarn.util.Records

Stack Trace:
java.lang.NoClassDefFoundError: Could not initialize class org.apache.hadoop.yarn.util.Records
	at org.apache.hadoop.yarn.api.records.ApplicationId.newInstance(ApplicationId.java:49)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler$TestParams.<init>(TestJobHistoryEventHandler.java:763)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testTimelineEventHandling(TestJobHistoryEventHandler.java:489)


FAILED:  org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testProcessDoneFilesOnLastAMRetry

Error Message:
Could not initialize class org.apache.hadoop.yarn.util.Records

Stack Trace:
java.lang.NoClassDefFoundError: Could not initialize class org.apache.hadoop.yarn.util.Records
	at org.apache.hadoop.yarn.api.records.ApplicationId.newInstance(ApplicationId.java:49)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler$TestParams.<init>(TestJobHistoryEventHandler.java:763)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testProcessDoneFilesOnLastAMRetry(TestJobHistoryEventHandler.java:284)


FAILED:  org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testDefaultFsIsUsedForHistory

Error Message:
Could not initialize class org.apache.hadoop.yarn.util.Records

Stack Trace:
java.lang.NoClassDefFoundError: Could not initialize class org.apache.hadoop.yarn.util.Records
	at org.apache.hadoop.yarn.api.records.ApplicationId.newInstance(ApplicationId.java:49)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler$TestParams.<init>(TestJobHistoryEventHandler.java:763)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler$TestParams.<init>(TestJobHistoryEventHandler.java:773)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testDefaultFsIsUsedForHistory(TestJobHistoryEventHandler.java:387)


FAILED:  org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testSigTermedFunctionality

Error Message:
org/apache/hadoop/yarn/util/Clock

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/util/Clock
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.Class.getDeclaredMethods0(Native Method)
	at java.lang.Class.privateGetDeclaredMethods(Class.java:2531)
	at java.lang.Class.getDeclaredMethods(Class.java:1855)
	at org.mockito.cglib.core.ReflectUtils.addAllMethods(ReflectUtils.java:349)
	at org.mockito.cglib.proxy.Enhancer.getMethods(Enhancer.java:427)
	at org.mockito.cglib.proxy.Enhancer.generateClass(Enhancer.java:457)
	at org.mockito.cglib.core.DefaultGeneratorStrategy.generate(DefaultGeneratorStrategy.java:25)
	at org.mockito.cglib.core.AbstractClassGenerator.create(AbstractClassGenerator.java:217)
	at org.mockito.cglib.proxy.Enhancer.createHelper(Enhancer.java:378)
	at org.mockito.cglib.proxy.Enhancer.createClass(Enhancer.java:318)
	at org.mockito.internal.creation.jmock.ClassImposterizer.createProxyClass(ClassImposterizer.java:93)
	at org.mockito.internal.creation.jmock.ClassImposterizer.imposterise(ClassImposterizer.java:50)
	at org.mockito.internal.util.MockUtil.createMock(MockUtil.java:54)
	at org.mockito.internal.MockitoCore.mock(MockitoCore.java:45)
	at org.mockito.Mockito.mock(Mockito.java:921)
	at org.mockito.Mockito.mock(Mockito.java:816)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testSigTermedFunctionality(TestJobHistoryEventHandler.java:794)


FAILED:  org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testMaxUnflushedCompletionEvents

Error Message:
Could not initialize class org.apache.hadoop.yarn.util.Records

Stack Trace:
java.lang.NoClassDefFoundError: Could not initialize class org.apache.hadoop.yarn.util.Records
	at org.apache.hadoop.yarn.api.records.ApplicationId.newInstance(ApplicationId.java:49)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler$TestParams.<init>(TestJobHistoryEventHandler.java:763)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler$TestParams.<init>(TestJobHistoryEventHandler.java:773)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testMaxUnflushedCompletionEvents(TestJobHistoryEventHandler.java:152)


FAILED:  org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testAMStartedEvent

Error Message:
Could not initialize class org.apache.hadoop.yarn.util.Records

Stack Trace:
java.lang.NoClassDefFoundError: Could not initialize class org.apache.hadoop.yarn.util.Records
	at org.apache.hadoop.yarn.api.records.ApplicationId.newInstance(ApplicationId.java:49)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler$TestParams.<init>(TestJobHistoryEventHandler.java:763)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler$TestParams.<init>(TestJobHistoryEventHandler.java:773)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testAMStartedEvent(TestJobHistoryEventHandler.java:447)


FAILED:  org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testUnflushedTimer

Error Message:
Could not initialize class org.apache.hadoop.yarn.util.Records

Stack Trace:
java.lang.NoClassDefFoundError: Could not initialize class org.apache.hadoop.yarn.util.Records
	at org.apache.hadoop.yarn.api.records.ApplicationId.newInstance(ApplicationId.java:49)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler$TestParams.<init>(TestJobHistoryEventHandler.java:763)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler$TestParams.<init>(TestJobHistoryEventHandler.java:773)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testUnflushedTimer(TestJobHistoryEventHandler.java:197)


FAILED:  org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testBatchedFlushJobEndMultiplier

Error Message:
Could not initialize class org.apache.hadoop.yarn.util.Records

Stack Trace:
java.lang.NoClassDefFoundError: Could not initialize class org.apache.hadoop.yarn.util.Records
	at org.apache.hadoop.yarn.api.records.ApplicationId.newInstance(ApplicationId.java:49)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler$TestParams.<init>(TestJobHistoryEventHandler.java:763)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler$TestParams.<init>(TestJobHistoryEventHandler.java:773)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testBatchedFlushJobEndMultiplier(TestJobHistoryEventHandler.java:240)


FAILED:  org.apache.hadoop.mapreduce.v2.app.TestJobEndNotifier.testNotifyRetries

Error Message:
org/apache/hadoop/yarn/api/records/Priority

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/api/records/Priority
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.Class.getDeclaredMethods0(Native Method)
	at java.lang.Class.privateGetDeclaredMethods(Class.java:2531)
	at java.lang.Class.getDeclaredMethods(Class.java:1855)
	at org.mockito.cglib.core.ReflectUtils.addAllMethods(ReflectUtils.java:349)
	at org.mockito.cglib.proxy.Enhancer.getMethods(Enhancer.java:427)
	at org.mockito.cglib.proxy.Enhancer.generateClass(Enhancer.java:457)
	at org.mockito.cglib.core.DefaultGeneratorStrategy.generate(DefaultGeneratorStrategy.java:25)
	at org.mockito.cglib.core.AbstractClassGenerator.create(AbstractClassGenerator.java:217)
	at org.mockito.cglib.proxy.Enhancer.createHelper(Enhancer.java:378)
	at org.mockito.cglib.proxy.Enhancer.createClass(Enhancer.java:318)
	at org.mockito.internal.creation.jmock.ClassImposterizer.createProxyClass(ClassImposterizer.java:93)
	at org.mockito.internal.creation.jmock.ClassImposterizer.imposterise(ClassImposterizer.java:50)
	at org.mockito.internal.util.MockUtil.createMock(MockUtil.java:54)
	at org.mockito.internal.MockitoCore.mock(MockitoCore.java:45)
	at org.mockito.Mockito.mock(Mockito.java:921)
	at org.mockito.Mockito.mock(Mockito.java:816)
	at org.apache.hadoop.mapreduce.v2.app.TestJobEndNotifier.testNotifyRetries(TestJobEndNotifier.java:171)


FAILED:  org.apache.hadoop.mapreduce.v2.app.TestJobEndNotifier.testNotificationOnLastRetryNormalShutdown

Error Message:
org/apache/hadoop/yarn/exceptions/YarnRuntimeException

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/exceptions/YarnRuntimeException
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.mapreduce.v2.app.TestJobEndNotifier.testNotificationOnLastRetryNormalShutdown(TestJobEndNotifier.java:204)


FAILED:  org.apache.hadoop.mapreduce.v2.app.TestJobEndNotifier.testAbsentNotificationOnNotLastRetryUnregistrationFailure

Error Message:
org/apache/hadoop/yarn/exceptions/YarnRuntimeException

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/exceptions/YarnRuntimeException
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.mapreduce.v2.app.TestJobEndNotifier.testAbsentNotificationOnNotLastRetryUnregistrationFailure(TestJobEndNotifier.java:227)


FAILED:  org.apache.hadoop.mapreduce.v2.app.TestJobEndNotifier.testNotificationOnLastRetryUnregistrationFailure

Error Message:
org/apache/hadoop/yarn/exceptions/YarnRuntimeException

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/exceptions/YarnRuntimeException
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.mapreduce.v2.app.TestJobEndNotifier.testNotificationOnLastRetryUnregistrationFailure(TestJobEndNotifier.java:254)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testAMRMTokenUpdate

Error Message:
org/apache/hadoop/io/retry/FailoverProxyProvider

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/io/retry/FailoverProxyProvider
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.updateAMRMToken(RMContainerAllocator.java:870)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:769)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$7.run(TestRMContainerAllocator.java:2706)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$7.run(TestRMContainerAllocator.java:2692)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testAMRMTokenUpdate(TestRMContainerAllocator.java:2691)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReportedAppProgressWithOnlyMaps

Error Message:
could not cleanup test dir

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: could not cleanup test dir
	at org.apache.hadoop.fs.AbstractFileSystem.createFileSystem(AbstractFileSystem.java:161)
	at org.apache.hadoop.fs.AbstractFileSystem.get(AbstractFileSystem.java:250)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:332)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:329)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.fs.FileContext.getAbstractFileSystem(FileContext.java:329)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:446)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:423)
	at org.apache.hadoop.fs.FileContext.getLocalFSFileContext(FileContext.java:409)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:239)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:223)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$3.<init>(TestRMContainerAllocator.java:1074)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReportedAppProgressWithOnlyMaps(TestRMContainerAllocator.java:1073)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testUnregistrationOnlyIfRegistered

Error Message:
could not cleanup test dir

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: could not cleanup test dir
	at org.apache.hadoop.fs.AbstractFileSystem.createFileSystem(AbstractFileSystem.java:161)
	at org.apache.hadoop.fs.AbstractFileSystem.get(AbstractFileSystem.java:250)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:332)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:329)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.fs.FileContext.getAbstractFileSystem(FileContext.java:329)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:446)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:423)
	at org.apache.hadoop.fs.FileContext.getLocalFSFileContext(FileContext.java:409)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:239)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:223)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$6.<init>(TestRMContainerAllocator.java:2392)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testUnregistrationOnlyIfRegistered(TestRMContainerAllocator.java:2390)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testRMContainerAllocatorResendsRequestsOnRMRestart

Error Message:
org/apache/hadoop/service/ServiceOperations

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/service/ServiceOperations
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.service.CompositeService.stop(CompositeService.java:157)
	at org.apache.hadoop.service.CompositeService.serviceStop(CompositeService.java:131)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceStop(ResourceManager.java:1100)
	at org.apache.hadoop.service.AbstractService.stop(AbstractService.java:221)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testRMContainerAllocatorResendsRequestsOnRMRestart(TestRMContainerAllocator.java:2599)



Hadoop-Mapreduce-trunk - Build # 2562 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2562/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32797 lines...]
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 8.007 sec - in org.apache.hadoop.mapreduce.lib.chain.TestChainErrors
Running org.apache.hadoop.mapreduce.lib.map.TestMultithreadedMapper
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.96 sec - in org.apache.hadoop.mapreduce.lib.map.TestMultithreadedMapper
Running org.apache.hadoop.mapreduce.TestMapCollection
Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 14.993 sec - in org.apache.hadoop.mapreduce.TestMapCollection
Running org.apache.hadoop.io.TestSequenceFileMergeProgress
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.571 sec - in org.apache.hadoop.io.TestSequenceFileMergeProgress

Results :

Tests in error: 
  TestMRTimelineEventHandling.testMRTimelineEventHandling:99 » IO Job didn't fin...
  TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled:162 » IO Jo...

Tests run: 523, Failures: 0, Errors: 2, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.244 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:50 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 31.598 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.325 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:44 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:47 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:46 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:04 h
[INFO] Finished at: 2015-11-03T10:28:59+00:00
[INFO] Final Memory: 35M/708M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HADOOP-12437
Updating HADOOP-12541
Updating HADOOP-12542
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling(TestMRTimelineEventHandling.java:99)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled(TestMRTimelineEventHandling.java:162)



Hadoop-Mapreduce-trunk - Build # 2561 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2561/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32793 lines...]
Running org.apache.hadoop.mapred.pipes.TestPipes
Tests run: 1, Failures: 0, Errors: 0, Skipped: 1, Time elapsed: 0.047 sec - in org.apache.hadoop.mapred.pipes.TestPipes
Running org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 26.639 sec - in org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Running org.apache.hadoop.mapred.TestReporter
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 13.624 sec - in org.apache.hadoop.mapred.TestReporter

Results :

Tests in error: 
  TestMRTimelineEventHandling.testMRTimelineEventHandling:99 » IO Job didn't fin...
  TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled:162 » IO Jo...

Tests run: 523, Failures: 0, Errors: 2, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.199 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:40 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 29.597 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.073 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:20 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:35 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:46 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:03 h
[INFO] Finished at: 2015-11-03T06:54:41+00:00
[INFO] Final Memory: 34M/713M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9313
Updating HDFS-9308
Updating HDFS-9275
Updating HDFS-9312
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling(TestMRTimelineEventHandling.java:99)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled(TestMRTimelineEventHandling.java:162)



Hadoop-Mapreduce-trunk - Build # 2560 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2560/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32795 lines...]
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.386 sec - in org.apache.hadoop.mapred.pipes.TestPipesNonJavaInputFormat
Running org.apache.hadoop.mapred.pipes.TestPipeApplication
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.574 sec - in org.apache.hadoop.mapred.pipes.TestPipeApplication
Running org.apache.hadoop.mapred.TestJavaSerialization
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 4.417 sec - in org.apache.hadoop.mapred.TestJavaSerialization
Running org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 26.478 sec - in org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Running org.apache.hadoop.ipc.TestMRCJCSocketFactory
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 20.274 sec - in org.apache.hadoop.ipc.TestMRCJCSocketFactory

Results :

Tests in error: 
  TestMRTimelineEventHandling.testMRTimelineEventHandling:99 » IO Job didn't fin...
  TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled:162 » IO Jo...

Tests run: 523, Failures: 0, Errors: 2, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.917 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:40 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 28.801 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  4.997 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:15 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:38 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:41 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 01:58 h
[INFO] Finished at: 2015-11-03T03:05:18+00:00
[INFO] Final Memory: 34M/708M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9339
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling(TestMRTimelineEventHandling.java:99)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled(TestMRTimelineEventHandling.java:162)



Hadoop-Mapreduce-trunk - Build # 2559 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2559/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32850 lines...]
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 139.979 sec - in org.apache.hadoop.mapreduce.v2.TestSpeculativeExecution
Running org.apache.hadoop.mapreduce.v2.TestMRAMWithNonNormalizedCapabilities
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 59.314 sec - in org.apache.hadoop.mapreduce.v2.TestMRAMWithNonNormalizedCapabilities
Running org.apache.hadoop.mapreduce.v2.TestMROldApiJobs
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 72.761 sec - in org.apache.hadoop.mapreduce.v2.TestMROldApiJobs
Running org.apache.hadoop.mapreduce.v2.TestUberAM

Results :

Tests in error: 
  TestMiniMRWithDFSWithDistinctUsers.tearDown:103 » NoClassDefFound org/apache/h...
  TestMiniMRWithDFSWithDistinctUsers.setUp:97 » YarnRuntime could not cleanup te...
  TestMiniMRWithDFSWithDistinctUsers.tearDown:104 » NoClassDefFound org/apache/h...
  TestMRTimelineEventHandling.testMRTimelineEventHandling:131 » NoClassDefFound ...
  TestMRTimelineEventHandling.testTimelineServiceStartInMiniCluster:51 » Runtime
  TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled:146 » Runtime

Tests run: 329, Failures: 0, Errors: 6, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.752 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:39 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 28.759 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  4.940 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:14 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:37 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:07 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 01:24 h
[INFO] Finished at: 2015-11-02T23:28:18+00:00
[INFO] Final Memory: 46M/797M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: ExecutionException: java.lang.RuntimeException: The forked VM terminated without properly saying goodbye. VM crash or System.exit called?
[ERROR] Command was /bin/sh -c cd /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient && /home/jenkins/tools/java/jdk1.7.0_55/jre/bin/java -Xmx2048m -XX:MaxPermSize=768m -XX:+HeapDumpOnOutOfMemoryError -jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefirebooter7991198813207507958.jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefire6750277308933156809tmp /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefire_1462208485522876016514tmp
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HADOOP-12508
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
6 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testTimelineServiceStartInMiniCluster

Error Message:
java.util.zip.ZipException: invalid stored block lengths

Stack Trace:
java.lang.RuntimeException: java.util.zip.ZipException: invalid stored block lengths
	at java.util.zip.InflaterInputStream.read(InflaterInputStream.java:164)
	at java.util.zip.InflaterInputStream.read(InflaterInputStream.java:122)
	at java.io.FilterInputStream.read(FilterInputStream.java:83)
	at org.apache.xerces.impl.XMLEntityManager$RewindableInputStream.read(Unknown Source)
	at org.apache.xerces.impl.XMLEntityManager.setupCurrentEntity(Unknown Source)
	at org.apache.xerces.impl.XMLVersionDetector.determineDocVersion(Unknown Source)
	at org.apache.xerces.parsers.XML11Configuration.parse(Unknown Source)
	at org.apache.xerces.parsers.XML11Configuration.parse(Unknown Source)
	at org.apache.xerces.parsers.XMLParser.parse(Unknown Source)
	at org.apache.xerces.parsers.DOMParser.parse(Unknown Source)
	at org.apache.xerces.jaxp.DocumentBuilderImpl.parse(Unknown Source)
	at javax.xml.parsers.DocumentBuilder.parse(DocumentBuilder.java:121)
	at org.apache.hadoop.conf.Configuration.parse(Configuration.java:2555)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:2628)
	at org.apache.hadoop.conf.Configuration.loadResources(Configuration.java:2577)
	at org.apache.hadoop.conf.Configuration.getProps(Configuration.java:2471)
	at org.apache.hadoop.conf.Configuration.get(Configuration.java:1045)
	at org.apache.hadoop.conf.Configuration.getTrimmed(Configuration.java:1095)
	at org.apache.hadoop.conf.Configuration.getInt(Configuration.java:1315)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler.validateConf(CapacityScheduler.java:171)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler.initScheduler(CapacityScheduler.java:294)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler.serviceInit(CapacityScheduler.java:334)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:567)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:979)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:254)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:299)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:451)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:273)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testTimelineServiceStartInMiniCluster(TestMRTimelineEventHandling.java:51)


FAILED:  org.apache.hadoop.mapred.TestMiniMRWithDFSWithDistinctUsers.testMultipleSpills

Error Message:
org/apache/hadoop/util/ShutdownThreadsHelper

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/util/ShutdownThreadsHelper
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.serviceStop(HistoryFileManager.java:635)
	at org.apache.hadoop.service.AbstractService.stop(AbstractService.java:221)
	at org.apache.hadoop.mapreduce.v2.hs.JobHistory.serviceStop(JobHistory.java:171)
	at org.apache.hadoop.service.AbstractService.stop(AbstractService.java:221)
	at org.apache.hadoop.service.ServiceOperations.stop(ServiceOperations.java:52)
	at org.apache.hadoop.service.ServiceOperations.stopQuietly(ServiceOperations.java:80)
	at org.apache.hadoop.service.CompositeService.stop(CompositeService.java:157)
	at org.apache.hadoop.service.CompositeService.serviceStop(CompositeService.java:131)
	at org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer.serviceStop(JobHistoryServer.java:211)
	at org.apache.hadoop.service.AbstractService.stop(AbstractService.java:221)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster$JobHistoryServerWrapper.serviceStop(MiniMRYarnCluster.java:257)
	at org.apache.hadoop.service.AbstractService.stop(AbstractService.java:221)
	at org.apache.hadoop.service.ServiceOperations.stop(ServiceOperations.java:52)
	at org.apache.hadoop.service.ServiceOperations.stopQuietly(ServiceOperations.java:80)
	at org.apache.hadoop.service.CompositeService.stop(CompositeService.java:157)
	at org.apache.hadoop.service.CompositeService.serviceStop(CompositeService.java:131)
	at org.apache.hadoop.service.AbstractService.stop(AbstractService.java:221)
	at org.apache.hadoop.mapred.MiniMRYarnClusterAdapter.stop(MiniMRYarnClusterAdapter.java:55)
	at org.apache.hadoop.mapred.MiniMRCluster.shutdown(MiniMRCluster.java:267)
	at org.apache.hadoop.mapred.TestMiniMRWithDFSWithDistinctUsers.tearDown(TestMiniMRWithDFSWithDistinctUsers.java:103)


FAILED:  org.apache.hadoop.mapred.TestMiniMRWithDFSWithDistinctUsers.testDistinctUsers

Error Message:
could not cleanup test dir: org.apache.hadoop.fs.UnsupportedFileSystemException: fs.AbstractFileSystem.file.impl=null: No AbstractFileSystem configured for scheme: file

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: could not cleanup test dir: org.apache.hadoop.fs.UnsupportedFileSystemException: fs.AbstractFileSystem.file.impl=null: No AbstractFileSystem configured for scheme: file
	at org.apache.hadoop.fs.AbstractFileSystem.createFileSystem(AbstractFileSystem.java:161)
	at org.apache.hadoop.fs.AbstractFileSystem.get(AbstractFileSystem.java:250)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:332)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:329)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.fs.FileContext.getAbstractFileSystem(FileContext.java:329)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:446)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:423)
	at org.apache.hadoop.fs.FileContext.getLocalFSFileContext(FileContext.java:409)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.<init>(MiniYARNCluster.java:148)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.<init>(MiniMRYarnCluster.java:79)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.<init>(MiniMRYarnCluster.java:75)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:73)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.TestMiniMRWithDFSWithDistinctUsers.setUp(TestMiniMRWithDFSWithDistinctUsers.java:97)


FAILED:  org.apache.hadoop.mapred.TestMiniMRWithDFSWithDistinctUsers.testDistinctUsers

Error Message:
org/apache/hadoop/util/IntrusiveCollection$IntrusiveIterator

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/util/IntrusiveCollection$IntrusiveIterator
	at org.apache.hadoop.util.IntrusiveCollection.iterator(IntrusiveCollection.java:213)
	at org.apache.hadoop.util.IntrusiveCollection.clear(IntrusiveCollection.java:368)
	at org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager.clearPendingCachingCommands(DatanodeManager.java:1564)
	at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.stopActiveServices(FSNamesystem.java:1227)
	at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.close(FSNamesystem.java:1587)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.stopCommonServices(NameNode.java:773)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.stop(NameNode.java:943)
	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:1911)
	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:1880)
	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:1873)
	at org.apache.hadoop.mapred.TestMiniMRWithDFSWithDistinctUsers.tearDown(TestMiniMRWithDFSWithDistinctUsers.java:104)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling

Error Message:
org/apache/hadoop/yarn/server/nodemanager/CMgrCompletedAppsEvent$Reason

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/nodemanager/CMgrCompletedAppsEvent$Reason
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl.cleanUpApplicationsOnNMShutDown(ContainerManagerImpl.java:569)
	at org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl.serviceStop(ContainerManagerImpl.java:536)
	at org.apache.hadoop.service.AbstractService.stop(AbstractService.java:221)
	at org.apache.hadoop.service.ServiceOperations.stop(ServiceOperations.java:52)
	at org.apache.hadoop.service.ServiceOperations.stopQuietly(ServiceOperations.java:80)
	at org.apache.hadoop.service.CompositeService.stop(CompositeService.java:157)
	at org.apache.hadoop.service.CompositeService.serviceStop(CompositeService.java:131)
	at org.apache.hadoop.yarn.server.nodemanager.NodeManager.serviceStop(NodeManager.java:377)
	at org.apache.hadoop.service.AbstractService.stop(AbstractService.java:221)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$NodeManagerWrapper.serviceStop(MiniYARNCluster.java:591)
	at org.apache.hadoop.service.AbstractService.stop(AbstractService.java:221)
	at org.apache.hadoop.service.ServiceOperations.stop(ServiceOperations.java:52)
	at org.apache.hadoop.service.ServiceOperations.stopQuietly(ServiceOperations.java:80)
	at org.apache.hadoop.service.CompositeService.stop(CompositeService.java:157)
	at org.apache.hadoop.service.CompositeService.serviceStop(CompositeService.java:131)
	at org.apache.hadoop.service.AbstractService.stop(AbstractService.java:221)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling(TestMRTimelineEventHandling.java:131)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled

Error Message:
java.util.zip.ZipException: invalid stored block lengths

Stack Trace:
java.lang.RuntimeException: java.util.zip.ZipException: invalid stored block lengths
	at java.util.zip.InflaterInputStream.read(InflaterInputStream.java:164)
	at java.util.zip.InflaterInputStream.read(InflaterInputStream.java:122)
	at java.io.FilterInputStream.read(FilterInputStream.java:83)
	at org.apache.xerces.impl.XMLEntityManager$RewindableInputStream.read(Unknown Source)
	at org.apache.xerces.impl.XMLEntityManager.setupCurrentEntity(Unknown Source)
	at org.apache.xerces.impl.XMLVersionDetector.determineDocVersion(Unknown Source)
	at org.apache.xerces.parsers.XML11Configuration.parse(Unknown Source)
	at org.apache.xerces.parsers.XML11Configuration.parse(Unknown Source)
	at org.apache.xerces.parsers.XMLParser.parse(Unknown Source)
	at org.apache.xerces.parsers.DOMParser.parse(Unknown Source)
	at org.apache.xerces.jaxp.DocumentBuilderImpl.parse(Unknown Source)
	at javax.xml.parsers.DocumentBuilder.parse(DocumentBuilder.java:121)
	at org.apache.hadoop.conf.Configuration.parse(Configuration.java:2555)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:2628)
	at org.apache.hadoop.conf.Configuration.loadResources(Configuration.java:2577)
	at org.apache.hadoop.conf.Configuration.getProps(Configuration.java:2471)
	at org.apache.hadoop.conf.Configuration.get(Configuration.java:1045)
	at org.apache.hadoop.conf.Configuration.getTrimmed(Configuration.java:1095)
	at org.apache.hadoop.conf.Configuration.getInt(Configuration.java:1315)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler.validateConf(CapacityScheduler.java:171)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler.initScheduler(CapacityScheduler.java:294)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler.serviceInit(CapacityScheduler.java:334)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:567)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:979)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:254)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:299)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:451)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:273)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled(TestMRTimelineEventHandling.java:146)



Hadoop-Mapreduce-trunk - Build # 2558 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2558/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32791 lines...]
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.371 sec - in org.apache.hadoop.mapred.pipes.TestPipeApplication
Running org.apache.hadoop.mapred.pipes.TestPipes
Tests run: 1, Failures: 0, Errors: 0, Skipped: 1, Time elapsed: 0.047 sec - in org.apache.hadoop.mapred.pipes.TestPipes
Running org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 26.459 sec - in org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Running org.apache.hadoop.mapred.TestReporter
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 13.611 sec - in org.apache.hadoop.mapred.TestReporter

Results :

Tests in error: 
  TestMRTimelineEventHandling.testMRTimelineEventHandling:99 » IO Job didn't fin...
  TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled:162 » IO Jo...

Tests run: 519, Failures: 0, Errors: 2, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.011 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:41 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 29.482 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  4.909 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:22 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:40 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:40 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 01:57 h
[INFO] Finished at: 2015-11-02T20:47:43+00:00
[INFO] Final Memory: 34M/751M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: ExecutionException: java.lang.RuntimeException: The forked VM terminated without properly saying goodbye. VM crash or System.exit called?
[ERROR] Command was /bin/sh -c cd /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient && /home/jenkins/tools/java/jdk1.7.0_55/jre/bin/java -Xmx2048m -XX:MaxPermSize=768m -XX:+HeapDumpOnOutOfMemoryError -jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefirebooter5672922163703121592.jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefire1843709424507362tmp /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefire_3053443288227183453996tmp
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9329
Updating HADOOP-12533
Updating MAPREDUCE-6525
Updating HDFS-8777
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling(TestMRTimelineEventHandling.java:99)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled(TestMRTimelineEventHandling.java:162)



Hadoop-Mapreduce-trunk - Build # 2557 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2557/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 33056 lines...]
  TestHsWebServicesJobsQuery.<init>:109->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestHsWebServicesJobsQuery.<init>:109->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestHsWebServicesJobsQuery.<init>:109->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestHsWebServicesJobsQuery.<init>:109->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestHsWebServicesJobsQuery.<init>:109->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestHsWebServicesJobsQuery.<init>:109->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestHsWebServicesJobsQuery.<init>:109->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestHsWebServicesJobsQuery.<init>:109->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestHsWebServicesJobsQuery.<init>:109->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestHsWebServicesJobsQuery.<init>:109->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestHsWebServicesJobsQuery.<init>:109->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestHsWebServicesJobsQuery.<init>:109->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestHistoryFileManager.testCreateDirsWithoutFileSystem:109 » NoClassDefFound o...
  TestHistoryFileManager.testCreateDirsWithFileSystemBecomingAvailBeforeTimeout:188 NoClassDefFound
  TestHistoryFileManager.testHistoryFileInfoSummaryFileNotExist:235 » NoClassDefFound
  TestHistoryFileManager.testCreateDirsWithFileSystemNotBecomingAvailBeforeTimeout » 

Tests run: 198, Failures: 0, Errors: 33, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.946 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:40 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 29.075 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.941 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:21 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. FAILURE [04:39 min]
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 16:21 min
[INFO] Finished at: 2015-11-02T16:17:27+00:00
[INFO] Final Memory: 46M/805M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-hs: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-hs
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
33 tests failed.
FAILED:  org.apache.hadoop.mapreduce.v2.hs.TestHistoryFileManager.testCreateDirsWithoutFileSystem

Error Message:
org/apache/hadoop/yarn/api/ApplicationConstants$Environment

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/api/ApplicationConstants$Environment
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.conf.YarnConfiguration.<clinit>(YarnConfiguration.java:649)
	at org.apache.hadoop.mapreduce.v2.hs.TestHistoryFileManager.testCreateDirsWithoutFileSystem(TestHistoryFileManager.java:109)


FAILED:  org.apache.hadoop.mapreduce.v2.hs.TestHistoryFileManager.testCreateDirsWithFileSystemBecomingAvailBeforeTimeout

Error Message:
org/apache/hadoop/yarn/util/SystemClock

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/util/SystemClock
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.mapreduce.v2.hs.TestHistoryFileManager.testCreateDirsWithFileSystemBecomingAvailBeforeTimeout(TestHistoryFileManager.java:188)


FAILED:  org.apache.hadoop.mapreduce.v2.hs.TestHistoryFileManager.testHistoryFileInfoSummaryFileNotExist

Error Message:
org/apache/hadoop/yarn/factory/providers/RecordFactoryProvider

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/factory/providers/RecordFactoryProvider
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.mapreduce.TypeConverter.<clinit>(TypeConverter.java:63)
	at org.apache.hadoop.mapreduce.v2.hs.TestHistoryFileManager.testHistoryFileInfoSummaryFileNotExist(TestHistoryFileManager.java:235)


FAILED:  org.apache.hadoop.mapreduce.v2.hs.TestHistoryFileManager.testCreateDirsWithFileSystemNotBecomingAvailBeforeTimeout

Error Message:
Unexpected exception, expected<org.apache.hadoop.yarn.exceptions.YarnRuntimeException> but was<java.lang.NoClassDefFoundError>

Stack Trace:
java.lang.Exception: Unexpected exception, expected<org.apache.hadoop.yarn.exceptions.YarnRuntimeException> but was<java.lang.NoClassDefFoundError>
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.mapreduce.v2.hs.TestHistoryFileManager.testCreateDirsWithFileSystemNotBecomingAvailBeforeTimeout(TestHistoryFileManager.java:197)


FAILED:  org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.testEventsFlushOnStop

Error Message:
org/apache/hadoop/util/PlatformName

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/util/PlatformName
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.security.UserGroupInformation.getOSLoginModuleName(UserGroupInformation.java:378)
	at org.apache.hadoop.security.UserGroupInformation.<clinit>(UserGroupInformation.java:423)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:445)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:423)
	at org.apache.hadoop.fs.FileContext.getLocalFSFileContext(FileContext.java:409)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:239)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:208)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:189)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:150)
	at org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents$MRAppWithSpecialHistoryHandler.<init>(TestJobHistoryEvents.java:244)
	at org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.testEventsFlushOnStop(TestJobHistoryEvents.java:114)


FAILED:  org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.testJobHistoryEventHandlerIsFirstServiceToStop

Error Message:
Could not initialize class org.apache.hadoop.security.UserGroupInformation

Stack Trace:
java.lang.NoClassDefFoundError: Could not initialize class org.apache.hadoop.security.UserGroupInformation
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:445)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:423)
	at org.apache.hadoop.fs.FileContext.getLocalFSFileContext(FileContext.java:409)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:239)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:208)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:189)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:150)
	at org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents$MRAppWithSpecialHistoryHandler.<init>(TestJobHistoryEvents.java:244)
	at org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.testJobHistoryEventHandlerIsFirstServiceToStop(TestJobHistoryEvents.java:147)


FAILED:  org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.testHistoryEvents

Error Message:
Could not initialize class org.apache.hadoop.security.UserGroupInformation

Stack Trace:
java.lang.NoClassDefFoundError: Could not initialize class org.apache.hadoop.security.UserGroupInformation
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:445)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:423)
	at org.apache.hadoop.fs.FileContext.getLocalFSFileContext(FileContext.java:409)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:239)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:208)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:189)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:150)
	at org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents$MRAppWithHistory.<init>(TestJobHistoryEvents.java:219)
	at org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.testHistoryEvents(TestJobHistoryEvents.java:54)


FAILED:  org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.testAssignedQueue

Error Message:
Could not initialize class org.apache.hadoop.security.UserGroupInformation

Stack Trace:
java.lang.NoClassDefFoundError: Could not initialize class org.apache.hadoop.security.UserGroupInformation
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:445)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:423)
	at org.apache.hadoop.fs.FileContext.getLocalFSFileContext(FileContext.java:409)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:239)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:208)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:155)
	at org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents$MRAppWithHistory.<init>(TestJobHistoryEvents.java:224)
	at org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.testAssignedQueue(TestJobHistoryEvents.java:162)


FAILED:  org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.testJobsQueryStartTimeEndNegative

Error Message:
org/apache/hadoop/security/token/SecretManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/security/token/SecretManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl.<clinit>(JobImpl.java:245)
	at org.apache.hadoop.mapreduce.v2.app.MockJobs.getCounters(MockJobs.java:443)
	at org.apache.hadoop.mapreduce.v2.app.MockJobs.newJob(MockJobs.java:492)
	at org.apache.hadoop.mapreduce.v2.app.MockJobs.newJob(MockJobs.java:482)
	at org.apache.hadoop.mapreduce.v2.app.MockJobs.newJob(MockJobs.java:478)
	at org.apache.hadoop.mapreduce.v2.app.MockJobs.newJobs(MockJobs.java:122)
	at org.apache.hadoop.mapreduce.v2.hs.MockHistoryJobs.newHistoryJobs(MockHistoryJobs.java:54)
	at org.apache.hadoop.mapreduce.v2.hs.MockHistoryContext.<init>(MockHistoryContext.java:43)
	at org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery$1.configureServlets(TestHsWebServicesJobsQuery.java:77)
	at com.google.inject.servlet.ServletModule.configure(ServletModule.java:53)
	at com.google.inject.AbstractModule.configure(AbstractModule.java:59)
	at com.google.inject.spi.Elements$RecordingBinder.install(Elements.java:223)
	at com.google.inject.spi.Elements.getElements(Elements.java:101)
	at com.google.inject.internal.InjectorShell$Builder.build(InjectorShell.java:133)
	at com.google.inject.internal.InternalInjectorCreator.build(InternalInjectorCreator.java:103)
	at com.google.inject.Guice.createInjector(Guice.java:95)
	at com.google.inject.Guice.createInjector(Guice.java:72)
	at com.google.inject.Guice.createInjector(Guice.java:62)
	at org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.<init>(TestHsWebServicesJobsQuery.java:73)


FAILED:  org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.testJobsQueryFinishTimeInvalidformat

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.<init>(TestHsWebServicesJobsQuery.java:109)


FAILED:  org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.testJobsQueryStateInvalid

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.<init>(TestHsWebServicesJobsQuery.java:109)


FAILED:  org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.testJobsQueryFinishTimeEndInvalidformat

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.<init>(TestHsWebServicesJobsQuery.java:109)


FAILED:  org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.testJobsQueryStartTimeEnd

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.<init>(TestHsWebServicesJobsQuery.java:109)


FAILED:  org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.testJobsQueryStartTimeInvalidformat

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.<init>(TestHsWebServicesJobsQuery.java:109)


FAILED:  org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.testJobsQueryLimit

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.<init>(TestHsWebServicesJobsQuery.java:109)


FAILED:  org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.testJobsQueryQueue

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.<init>(TestHsWebServicesJobsQuery.java:109)


FAILED:  org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.testJobsQueryState

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.<init>(TestHsWebServicesJobsQuery.java:109)


FAILED:  org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.testJobsQueryStateNone

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.<init>(TestHsWebServicesJobsQuery.java:109)


FAILED:  org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.testJobsQueryStartTimeNegative

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.<init>(TestHsWebServicesJobsQuery.java:109)


FAILED:  org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.testJobsQueryFinishTimeBeginEndInvalid

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.<init>(TestHsWebServicesJobsQuery.java:109)


FAILED:  org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.testJobsQueryUserNone

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.<init>(TestHsWebServicesJobsQuery.java:109)


FAILED:  org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.testJobsQueryStartTimeBeginEndInvalid

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.<init>(TestHsWebServicesJobsQuery.java:109)


FAILED:  org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.testJobsQueryFinishTimeBeginNegative

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.<init>(TestHsWebServicesJobsQuery.java:109)


FAILED:  org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.testJobsQueryStartTimeBeginEnd

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.<init>(TestHsWebServicesJobsQuery.java:109)


FAILED:  org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.testJobsQueryLimitInvalid

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.<init>(TestHsWebServicesJobsQuery.java:109)


FAILED:  org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.testJobsQueryQueueNonExist

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.<init>(TestHsWebServicesJobsQuery.java:109)


FAILED:  org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.testJobsQueryStartTimeEndInvalidformat

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.<init>(TestHsWebServicesJobsQuery.java:109)


FAILED:  org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.testJobsQueryStartTimeBegin

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.<init>(TestHsWebServicesJobsQuery.java:109)


FAILED:  org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.testJobsQueryUser

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.<init>(TestHsWebServicesJobsQuery.java:109)


FAILED:  org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.testJobsQueryFinishTimeBeginEnd

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.<init>(TestHsWebServicesJobsQuery.java:109)


FAILED:  org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.testJobsQueryFinishTimeEndNegative

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.<init>(TestHsWebServicesJobsQuery.java:109)


FAILED:  org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.testJobsQueryFinishTimeBegin

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.<init>(TestHsWebServicesJobsQuery.java:109)


FAILED:  org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.testJobsQueryFinishTimeEnd

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery.<init>(TestHsWebServicesJobsQuery.java:109)



Hadoop-Mapreduce-trunk - Build # 2556 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2556/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 35554 lines...]
Running org.apache.hadoop.mapred.TestClientRedirect
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 17.15 sec - in org.apache.hadoop.mapred.TestClientRedirect
Running org.apache.hadoop.mapred.TestReduceFetchFromPartialMem
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 95.487 sec - in org.apache.hadoop.mapred.TestReduceFetchFromPartialMem
Running org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 31.915 sec - in org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Running org.apache.hadoop.mapred.TestSequenceFileAsBinaryOutputFormat
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.942 sec - in org.apache.hadoop.mapred.TestSequenceFileAsBinaryOutputFormat

Results :

Tests in error: 
  TestMRJobsWithProfiler.testDifferentProfilers:117->testProfilerInternal:150 » 
  TestMRTimelineEventHandling.testMRTimelineEventHandling:99 » IO Job didn't fin...
  TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled:162 » IO Jo...

Tests run: 523, Failures: 0, Errors: 3, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.781 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [02:07 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 36.244 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  6.381 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [10:29 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [06:34 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  02:05 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:25 h
[INFO] Finished at: 2015-11-02T07:10:33+00:00
[INFO] Final Memory: 35M/751M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HADOOP-12047
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
3 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling(TestMRTimelineEventHandling.java:99)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled(TestMRTimelineEventHandling.java:162)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testDifferentProfilers

Error Message:
test timed out after 150000 milliseconds

Stack Trace:
java.lang.Exception: test timed out after 150000 milliseconds
	at java.lang.Thread.sleep(Native Method)
	at org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1353)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1311)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testProfilerInternal(TestMRJobsWithProfiler.java:150)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testDifferentProfilers(TestMRJobsWithProfiler.java:117)



Hadoop-Mapreduce-trunk - Build # 2555 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2555/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32278 lines...]
Running org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebApp
Tests run: 12, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 22.291 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebApp
Running org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobConf
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 7.743 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobConf
Running org.apache.hadoop.mapred.TestTaskAttemptListenerImpl
Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.811 sec - in org.apache.hadoop.mapred.TestTaskAttemptListenerImpl
Running org.apache.hadoop.mapred.TestTaskAttemptFinishingMonitor
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.02 sec - in org.apache.hadoop.mapred.TestTaskAttemptFinishingMonitor
Running org.apache.hadoop.mapred.TestLocalContainerLauncher
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.503 sec - in org.apache.hadoop.mapred.TestLocalContainerLauncher

Results :

Failed tests: 
  TestJobImpl.testUnusableNodeTransition:629->assertJobState:1012 expected:<SUCCEEDED> but was:<ERROR>

Tests run: 340, Failures: 1, Errors: 0, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.922 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [02:07 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 36.046 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  6.389 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [10:43 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 13:39 min
[INFO] Finished at: 2015-11-02T01:25:57+00:00
[INFO] Final Memory: 45M/1269M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9343
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl.testUnusableNodeTransition

Error Message:
expected:<SUCCEEDED> but was:<ERROR>

Stack Trace:
java.lang.AssertionError: expected:<SUCCEEDED> but was:<ERROR>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:144)
	at org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl.assertJobState(TestJobImpl.java:1012)
	at org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl.testUnusableNodeTransition(TestJobImpl.java:629)



Hadoop-Mapreduce-trunk - Build # 2554 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2554/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32793 lines...]
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 185.768 sec - in org.apache.hadoop.mapreduce.TestMRJobClient
Running org.apache.hadoop.mapreduce.TestMapCollection
Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 14.214 sec - in org.apache.hadoop.mapreduce.TestMapCollection
Running org.apache.hadoop.conf.TestNoDefaultsJobConf
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 24.018 sec - in org.apache.hadoop.conf.TestNoDefaultsJobConf
Running org.apache.hadoop.util.TestMRCJCReflectionUtils
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.643 sec - in org.apache.hadoop.util.TestMRCJCReflectionUtils
Running org.apache.hadoop.util.TestMRCJCRunJar
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.256 sec - in org.apache.hadoop.util.TestMRCJCRunJar

Results :

Tests in error: 
  TestMRTimelineEventHandling.testMRTimelineEventHandling:99 » IO Job didn't fin...
  TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled:162 » IO Jo...

Tests run: 523, Failures: 0, Errors: 2, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.779 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:39 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 28.860 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.914 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:15 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:41 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:44 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:01 h
[INFO] Finished at: 2015-10-31T12:32:07+00:00
[INFO] Final Memory: 34M/751M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-4937
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling(TestMRTimelineEventHandling.java:99)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled(TestMRTimelineEventHandling.java:162)



Hadoop-Mapreduce-trunk - Build # 2553 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2553/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32266 lines...]
Tests run: 20, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 10.807 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks
Running org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobConf
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 6.404 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobConf
Running org.apache.hadoop.mapreduce.v2.app.webapp.TestBlocks
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.217 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestBlocks
Running org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices
Tests run: 13, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 8.245 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices
Running org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts
Tests run: 15, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 9.202 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts
Running org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs
Tests run: 21, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 10.878 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs
Running org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.144 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt
Running org.apache.hadoop.mapreduce.TestMapreduceConfigFields
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.865 sec - in org.apache.hadoop.mapreduce.TestMapreduceConfigFields

Results :

Tests run: 339, Failures: 0, Errors: 0, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.190 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:49 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 31.553 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  6.430 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [09:44 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 12:16 min
[INFO] Finished at: 2015-10-31T06:09:36+00:00
[INFO] Final Memory: 39M/812M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: ExecutionException: java.lang.RuntimeException: The forked VM terminated without properly saying goodbye. VM crash or System.exit called?
[ERROR] Command was /bin/sh -c cd /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app && /home/jenkins/tools/java/jdk1.7.0_55/jre/bin/java -Xmx2048m -XX:MaxPermSize=768m -XX:+HeapDumpOnOutOfMemoryError -jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire/surefirebooter6096526959654697969.jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire/surefire437083167251354693tmp /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire/surefire_71778419126656905943tmp
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
All tests passed

Hadoop-Mapreduce-trunk - Build # 2552 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2552/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 35582 lines...]
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 17.611 sec - in org.apache.hadoop.mapred.TestClientRedirect
Running org.apache.hadoop.mapred.TestReduceFetchFromPartialMem
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 98.384 sec - in org.apache.hadoop.mapred.TestReduceFetchFromPartialMem
Running org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 37.588 sec - in org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Running org.apache.hadoop.mapred.TestSequenceFileAsBinaryOutputFormat
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.621 sec - in org.apache.hadoop.mapred.TestSequenceFileAsBinaryOutputFormat

Results :

Tests in error: 
  TestMRJobsWithProfiler.testDifferentProfilers:117->testProfilerInternal:150 » 
  TestMiniMRClientCluster.testRestart:114 » YarnRuntime org.apache.hadoop.yarn.e...
  TestMRTimelineEventHandling.testMRTimelineEventHandling:99 » IO Job didn't fin...
  TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled:162 » IO Jo...

Tests run: 523, Failures: 0, Errors: 4, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.957 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [02:16 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 39.145 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  6.890 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [11:02 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [06:29 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  02:03 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:23 h
[INFO] Finished at: 2015-10-31T03:18:17+00:00
[INFO] Final Memory: 36M/981M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating MAPREDUCE-6451
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
4 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart

Error Message:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: Failed to intialize existing directories

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: org.apache.hadoop.yarn.exceptions.YarnRuntimeException: Failed to intialize existing directories
	at org.apache.hadoop.fs.RawLocalFileSystem.listStatus(RawLocalFileSystem.java:460)
	at org.apache.hadoop.fs.DelegateToFileSystem.listStatus(DelegateToFileSystem.java:168)
	at org.apache.hadoop.fs.ChecksumFs.listStatus(ChecksumFs.java:521)
	at org.apache.hadoop.fs.AbstractFileSystem$1.<init>(AbstractFileSystem.java:890)
	at org.apache.hadoop.fs.AbstractFileSystem.listStatusIterator(AbstractFileSystem.java:888)
	at org.apache.hadoop.fs.FileContext$22.next(FileContext.java:1492)
	at org.apache.hadoop.fs.FileContext$22.next(FileContext.java:1487)
	at org.apache.hadoop.fs.FSLinkResolver.resolve(FSLinkResolver.java:90)
	at org.apache.hadoop.fs.FileContext.listStatus(FileContext.java:1487)
	at org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils.localGlobber(JobHistoryUtils.java:456)
	at org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils.localGlobber(JobHistoryUtils.java:444)
	at org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils.localGlobber(JobHistoryUtils.java:439)
	at org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.findTimestampedDirectories(HistoryFileManager.java:778)
	at org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.initExisting(HistoryFileManager.java:672)
	at org.apache.hadoop.mapreduce.v2.hs.JobHistory.serviceInit(JobHistory.java:97)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer.serviceInit(JobHistoryServer.java:151)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster$JobHistoryServerWrapper.serviceStart(MiniMRYarnCluster.java:211)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.service.CompositeService.serviceStart(CompositeService.java:120)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.mapred.MiniMRYarnClusterAdapter.restart(MiniMRYarnClusterAdapter.java:73)
	at org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart(TestMiniMRClientCluster.java:114)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testDifferentProfilers

Error Message:
test timed out after 150000 milliseconds

Stack Trace:
java.lang.Exception: test timed out after 150000 milliseconds
	at java.lang.Thread.sleep(Native Method)
	at org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1353)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1311)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testProfilerInternal(TestMRJobsWithProfiler.java:150)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testDifferentProfilers(TestMRJobsWithProfiler.java:117)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling(TestMRTimelineEventHandling.java:99)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled(TestMRTimelineEventHandling.java:162)



Hadoop-Mapreduce-trunk - Build # 2551 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2551/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32792 lines...]
Running org.apache.hadoop.mapreduce.v2.TestUberAM
Tests run: 12, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 362.066 sec - in org.apache.hadoop.mapreduce.v2.TestUberAM
Running org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 65.944 sec - in org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService
Running org.apache.hadoop.mapreduce.v2.TestMRAMWithNonNormalizedCapabilities
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 61.674 sec - in org.apache.hadoop.mapreduce.v2.TestMRAMWithNonNormalizedCapabilities
Running org.apache.hadoop.mapreduce.TestValueIterReset
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.531 sec - in org.apache.hadoop.mapreduce.TestValueIterReset
Running org.apache.hadoop.mapreduce.TestMapReduceLazyOutput
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 145.144 sec - in org.apache.hadoop.mapreduce.TestMapReduceLazyOutput

Results :

Tests in error: 
  TestMRTimelineEventHandling.testMRTimelineEventHandling:99 » IO Job didn't fin...
  TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled:162 » IO Jo...

Tests run: 505, Failures: 0, Errors: 2, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.243 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:53 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 32.083 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  6.499 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:48 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:52 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:48 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:07 h
[INFO] Finished at: 2015-10-30T21:39:01+00:00
[INFO] Final Memory: 36M/982M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: ExecutionException: java.lang.RuntimeException: The forked VM terminated without properly saying goodbye. VM crash or System.exit called?
[ERROR] Command was /bin/sh -c cd /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient && /home/jenkins/tools/java/jdk1.7.0_55/jre/bin/java -Xmx2048m -XX:MaxPermSize=768m -XX:+HeapDumpOnOutOfMemoryError -jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefirebooter4737110294281101753.jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefire744886513436284046tmp /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefire_2358966630585191508830tmp
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HADOOP-12133
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling(TestMRTimelineEventHandling.java:99)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled(TestMRTimelineEventHandling.java:162)



Hadoop-Mapreduce-trunk - Build # 2550 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2550/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32790 lines...]
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.362 sec - in org.apache.hadoop.mapred.pipes.TestPipesNonJavaInputFormat
Running org.apache.hadoop.mapred.pipes.TestPipeApplication
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.576 sec - in org.apache.hadoop.mapred.pipes.TestPipeApplication
Running org.apache.hadoop.mapred.pipes.TestPipes
Tests run: 1, Failures: 0, Errors: 0, Skipped: 1, Time elapsed: 0.047 sec - in org.apache.hadoop.mapred.pipes.TestPipes
Running org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 26.491 sec - in org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Running org.apache.hadoop.mapred.TestReporter
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 13.718 sec - in org.apache.hadoop.mapred.TestReporter

Results :

Tests in error: 
  TestMRTimelineEventHandling.testMRTimelineEventHandling:99 » IO Job didn't fin...
  TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled:162 » IO Jo...

Tests run: 523, Failures: 0, Errors: 2, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.825 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:40 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 29.078 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.050 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:22 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:38 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:43 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:01 h
[INFO] Finished at: 2015-10-30T18:23:12+00:00
[INFO] Final Memory: 34M/750M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating MAPREDUCE-6528
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling(TestMRTimelineEventHandling.java:99)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled(TestMRTimelineEventHandling.java:162)



Hadoop-Mapreduce-trunk - Build # 2549 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2549/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32273 lines...]
Running org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler
Tests run: 13, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 22.378 sec - in org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler
Running org.apache.hadoop.mapreduce.jobhistory.TestEvents
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.686 sec - in org.apache.hadoop.mapreduce.jobhistory.TestEvents
Running org.apache.hadoop.mapred.TestTaskAttemptFinishingMonitor
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.654 sec - in org.apache.hadoop.mapred.TestTaskAttemptFinishingMonitor
Running org.apache.hadoop.mapred.TestTaskAttemptListenerImpl
Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.414 sec - in org.apache.hadoop.mapred.TestTaskAttemptListenerImpl
Running org.apache.hadoop.mapred.TestLocalContainerLauncher
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.931 sec - in org.apache.hadoop.mapred.TestLocalContainerLauncher

Results :

Failed tests: 
  TestJobImpl.testUnusableNodeTransition:629->assertJobState:1012 expected:<SUCCEEDED> but was:<ERROR>

Tests run: 340, Failures: 1, Errors: 0, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.832 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:40 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 29.685 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  4.978 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [09:17 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 11:37 min
[INFO] Finished at: 2015-10-30T15:10:45+00:00
[INFO] Final Memory: 41M/825M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-4937
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl.testUnusableNodeTransition

Error Message:
expected:<SUCCEEDED> but was:<ERROR>

Stack Trace:
java.lang.AssertionError: expected:<SUCCEEDED> but was:<ERROR>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:144)
	at org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl.assertJobState(TestJobImpl.java:1012)
	at org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl.testUnusableNodeTransition(TestJobImpl.java:629)



Hadoop-Mapreduce-trunk - Build # 2548 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2548/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32794 lines...]
Running org.apache.hadoop.mapreduce.lib.chain.TestChainErrors
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 7.662 sec - in org.apache.hadoop.mapreduce.lib.chain.TestChainErrors
Running org.apache.hadoop.mapreduce.lib.map.TestMultithreadedMapper
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.721 sec - in org.apache.hadoop.mapreduce.lib.map.TestMultithreadedMapper
Running org.apache.hadoop.mapreduce.TestMapCollection
Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 14.344 sec - in org.apache.hadoop.mapreduce.TestMapCollection
Running org.apache.hadoop.io.TestSequenceFileMergeProgress
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.426 sec - in org.apache.hadoop.io.TestSequenceFileMergeProgress

Results :

Tests in error: 
  TestMRTimelineEventHandling.testMRTimelineEventHandling:99 » IO Job didn't fin...
  TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled:162 » IO Jo...

Tests run: 523, Failures: 0, Errors: 2, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.018 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:44 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 28.552 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  4.848 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:13 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:34 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:40 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 01:57 h
[INFO] Finished at: 2015-10-30T12:48:03+00:00
[INFO] Final Memory: 34M/719M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating YARN-4320
Updating YARN-4312
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling(TestMRTimelineEventHandling.java:99)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled(TestMRTimelineEventHandling.java:162)



Hadoop-Mapreduce-trunk - Build # 2547 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2547/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32290 lines...]
	at org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler.processEventForTimelineServer(JobHistoryEventHandler.java:1015)
	at org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler.handleEvent(JobHistoryEventHandler.java:586)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.handleEvent(TestJobHistoryEventHandler.java:718)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testTimelineEventHandling(TestJobHistoryEventHandler.java:506)

Running org.apache.hadoop.mapreduce.jobhistory.TestJobSummary
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.41 sec - in org.apache.hadoop.mapreduce.jobhistory.TestJobSummary
Running org.apache.hadoop.mapreduce.jobhistory.TestEvents
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.738 sec - in org.apache.hadoop.mapreduce.jobhistory.TestEvents

Results :

Tests in error: 
  TestJobHistoryEventHandler.testTimelineEventHandling:506->handleEvent:718 » Runtime

Tests run: 340, Failures: 0, Errors: 1, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.005 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:43 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 29.049 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  4.918 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [09:51 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 12:13 min
[INFO] Finished at: 2015-10-30T09:00:34+00:00
[INFO] Final Memory: 41M/825M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating YARN-3580
Updating HDFS-9323
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testTimelineEventHandling

Error Message:
Failed to connect to timeline server. Connection retries limit exceeded. The posted timeline event may be missing

Stack Trace:
java.lang.RuntimeException: Failed to connect to timeline server. Connection retries limit exceeded. The posted timeline event may be missing
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$TimelineClientConnectionRetry.retryOn(TimelineClientImpl.java:206)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$TimelineJerseyRetryFilter.handle(TimelineClientImpl.java:245)
	at com.sun.jersey.api.client.Client.handle(Client.java:648)
	at com.sun.jersey.api.client.WebResource.handle(WebResource.java:670)
	at com.sun.jersey.api.client.WebResource.access$200(WebResource.java:74)
	at com.sun.jersey.api.client.WebResource$Builder.post(WebResource.java:563)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.doPostingObject(TimelineClientImpl.java:476)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$1.run(TimelineClientImpl.java:323)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$1.run(TimelineClientImpl.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.doPosting(TimelineClientImpl.java:320)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.putEntities(TimelineClientImpl.java:305)
	at org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler.processEventForTimelineServer(JobHistoryEventHandler.java:1015)
	at org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler.handleEvent(JobHistoryEventHandler.java:586)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.handleEvent(TestJobHistoryEventHandler.java:718)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testTimelineEventHandling(TestJobHistoryEventHandler.java:506)



Hadoop-Mapreduce-trunk - Build # 2546 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2546/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32288 lines...]

Running org.apache.hadoop.mapreduce.jobhistory.TestEvents
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.676 sec - in org.apache.hadoop.mapreduce.jobhistory.TestEvents
Running org.apache.hadoop.mapred.TestTaskAttemptFinishingMonitor
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.591 sec - in org.apache.hadoop.mapred.TestTaskAttemptFinishingMonitor
Running org.apache.hadoop.mapred.TestTaskAttemptListenerImpl
Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.406 sec - in org.apache.hadoop.mapred.TestTaskAttemptListenerImpl
Running org.apache.hadoop.mapred.TestLocalContainerLauncher
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.913 sec - in org.apache.hadoop.mapred.TestLocalContainerLauncher

Results :

Tests in error: 
  TestJobHistoryEventHandler.testTimelineEventHandling:506->handleEvent:718 » Runtime

Tests run: 340, Failures: 0, Errors: 1, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.152 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:39 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 29.290 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  4.902 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [09:50 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 12:09 min
[INFO] Finished at: 2015-10-30T01:38:12+00:00
[INFO] Final Memory: 41M/825M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating YARN-4313
Updating YARN-4127
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testTimelineEventHandling

Error Message:
Failed to connect to timeline server. Connection retries limit exceeded. The posted timeline event may be missing

Stack Trace:
java.lang.RuntimeException: Failed to connect to timeline server. Connection retries limit exceeded. The posted timeline event may be missing
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$TimelineClientConnectionRetry.retryOn(TimelineClientImpl.java:206)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$TimelineJerseyRetryFilter.handle(TimelineClientImpl.java:245)
	at com.sun.jersey.api.client.Client.handle(Client.java:648)
	at com.sun.jersey.api.client.WebResource.handle(WebResource.java:670)
	at com.sun.jersey.api.client.WebResource.access$200(WebResource.java:74)
	at com.sun.jersey.api.client.WebResource$Builder.post(WebResource.java:563)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.doPostingObject(TimelineClientImpl.java:474)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$1.run(TimelineClientImpl.java:323)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$1.run(TimelineClientImpl.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.doPosting(TimelineClientImpl.java:320)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.putEntities(TimelineClientImpl.java:305)
	at org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler.processEventForTimelineServer(JobHistoryEventHandler.java:1015)
	at org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler.handleEvent(JobHistoryEventHandler.java:586)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.handleEvent(TestJobHistoryEventHandler.java:718)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testTimelineEventHandling(TestJobHistoryEventHandler.java:506)



Hadoop-Mapreduce-trunk - Build # 2545 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2545/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32291 lines...]
	at org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler.processEventForTimelineServer(JobHistoryEventHandler.java:1015)
	at org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler.handleEvent(JobHistoryEventHandler.java:586)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.handleEvent(TestJobHistoryEventHandler.java:718)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testTimelineEventHandling(TestJobHistoryEventHandler.java:506)

Running org.apache.hadoop.mapreduce.jobhistory.TestJobSummary
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.389 sec - in org.apache.hadoop.mapreduce.jobhistory.TestJobSummary
Running org.apache.hadoop.mapreduce.jobhistory.TestEvents
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.683 sec - in org.apache.hadoop.mapreduce.jobhistory.TestEvents

Results :

Tests in error: 
  TestJobHistoryEventHandler.testTimelineEventHandling:506->handleEvent:718 » Runtime

Tests run: 340, Failures: 0, Errors: 1, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.893 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:40 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 29.075 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  4.790 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [09:39 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 11:57 min
[INFO] Finished at: 2015-10-29T23:08:49+00:00
[INFO] Final Memory: 41M/825M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating YARN-4310
Updating YARN-4183
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testTimelineEventHandling

Error Message:
Failed to connect to timeline server. Connection retries limit exceeded. The posted timeline event may be missing

Stack Trace:
java.lang.RuntimeException: Failed to connect to timeline server. Connection retries limit exceeded. The posted timeline event may be missing
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$TimelineClientConnectionRetry.retryOn(TimelineClientImpl.java:206)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$TimelineJerseyRetryFilter.handle(TimelineClientImpl.java:245)
	at com.sun.jersey.api.client.Client.handle(Client.java:648)
	at com.sun.jersey.api.client.WebResource.handle(WebResource.java:670)
	at com.sun.jersey.api.client.WebResource.access$200(WebResource.java:74)
	at com.sun.jersey.api.client.WebResource$Builder.post(WebResource.java:563)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.doPostingObject(TimelineClientImpl.java:474)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$1.run(TimelineClientImpl.java:323)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$1.run(TimelineClientImpl.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.doPosting(TimelineClientImpl.java:320)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.putEntities(TimelineClientImpl.java:305)
	at org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler.processEventForTimelineServer(JobHistoryEventHandler.java:1015)
	at org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler.handleEvent(JobHistoryEventHandler.java:586)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.handleEvent(TestJobHistoryEventHandler.java:718)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testTimelineEventHandling(TestJobHistoryEventHandler.java:506)



Hadoop-Mapreduce-trunk - Build # 2544 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2544/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32289 lines...]
Running org.apache.hadoop.mapreduce.jobhistory.TestEvents
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.783 sec - in org.apache.hadoop.mapreduce.jobhistory.TestEvents
Running org.apache.hadoop.mapred.TestTaskAttemptFinishingMonitor
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.749 sec - in org.apache.hadoop.mapred.TestTaskAttemptFinishingMonitor
Running org.apache.hadoop.mapred.TestTaskAttemptListenerImpl
Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.452 sec - in org.apache.hadoop.mapred.TestTaskAttemptListenerImpl
Running org.apache.hadoop.mapred.TestLocalContainerLauncher
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.16 sec - in org.apache.hadoop.mapred.TestLocalContainerLauncher

Results :

Tests in error: 
  TestJobHistoryEventHandler.testTimelineEventHandling:506->handleEvent:718 » Runtime

Tests run: 340, Failures: 0, Errors: 1, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.153 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:47 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 30.648 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.043 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [10:00 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 12:27 min
[INFO] Finished at: 2015-10-29T20:53:34+00:00
[INFO] Final Memory: 38M/716M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating MAPREDUCE-6515
Updating HDFS-9229
Updating HDFS-9332
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testTimelineEventHandling

Error Message:
Failed to connect to timeline server. Connection retries limit exceeded. The posted timeline event may be missing

Stack Trace:
java.lang.RuntimeException: Failed to connect to timeline server. Connection retries limit exceeded. The posted timeline event may be missing
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$TimelineClientConnectionRetry.retryOn(TimelineClientImpl.java:206)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$TimelineJerseyRetryFilter.handle(TimelineClientImpl.java:245)
	at com.sun.jersey.api.client.Client.handle(Client.java:648)
	at com.sun.jersey.api.client.WebResource.handle(WebResource.java:670)
	at com.sun.jersey.api.client.WebResource.access$200(WebResource.java:74)
	at com.sun.jersey.api.client.WebResource$Builder.post(WebResource.java:563)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.doPostingObject(TimelineClientImpl.java:474)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$1.run(TimelineClientImpl.java:323)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$1.run(TimelineClientImpl.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.doPosting(TimelineClientImpl.java:320)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.putEntities(TimelineClientImpl.java:305)
	at org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler.processEventForTimelineServer(JobHistoryEventHandler.java:1015)
	at org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler.handleEvent(JobHistoryEventHandler.java:586)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.handleEvent(TestJobHistoryEventHandler.java:718)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testTimelineEventHandling(TestJobHistoryEventHandler.java:506)



Hadoop-Mapreduce-trunk - Build # 2543 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2543/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32290 lines...]
Running org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebApp
Tests run: 12, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 24.868 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebApp
Running org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobConf
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 8.794 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobConf
Running org.apache.hadoop.mapred.TestTaskAttemptListenerImpl
Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.35 sec - in org.apache.hadoop.mapred.TestTaskAttemptListenerImpl
Running org.apache.hadoop.mapred.TestTaskAttemptFinishingMonitor
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.089 sec - in org.apache.hadoop.mapred.TestTaskAttemptFinishingMonitor
Running org.apache.hadoop.mapred.TestLocalContainerLauncher
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.535 sec - in org.apache.hadoop.mapred.TestLocalContainerLauncher

Results :

Tests in error: 
  TestJobHistoryEventHandler.testTimelineEventHandling:506->handleEvent:718 » Runtime

Tests run: 339, Failures: 0, Errors: 1, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  4.180 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [02:12 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 36.914 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  6.629 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [11:37 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 14:39 min
[INFO] Finished at: 2015-10-29T18:33:48+00:00
[INFO] Final Memory: 41M/825M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating YARN-2902
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testTimelineEventHandling

Error Message:
Failed to connect to timeline server. Connection retries limit exceeded. The posted timeline event may be missing

Stack Trace:
java.lang.RuntimeException: Failed to connect to timeline server. Connection retries limit exceeded. The posted timeline event may be missing
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$TimelineClientConnectionRetry.retryOn(TimelineClientImpl.java:206)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$TimelineJerseyRetryFilter.handle(TimelineClientImpl.java:245)
	at com.sun.jersey.api.client.Client.handle(Client.java:648)
	at com.sun.jersey.api.client.WebResource.handle(WebResource.java:670)
	at com.sun.jersey.api.client.WebResource.access$200(WebResource.java:74)
	at com.sun.jersey.api.client.WebResource$Builder.post(WebResource.java:563)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.doPostingObject(TimelineClientImpl.java:474)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$1.run(TimelineClientImpl.java:323)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$1.run(TimelineClientImpl.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.doPosting(TimelineClientImpl.java:320)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.putEntities(TimelineClientImpl.java:305)
	at org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler.processEventForTimelineServer(JobHistoryEventHandler.java:1015)
	at org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler.handleEvent(JobHistoryEventHandler.java:586)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.handleEvent(TestJobHistoryEventHandler.java:718)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testTimelineEventHandling(TestJobHistoryEventHandler.java:506)



Hadoop-Mapreduce-trunk - Build # 2542 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2542/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32294 lines...]
	at org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler.processEventForTimelineServer(JobHistoryEventHandler.java:1015)
	at org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler.handleEvent(JobHistoryEventHandler.java:586)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.handleEvent(TestJobHistoryEventHandler.java:718)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testTimelineEventHandling(TestJobHistoryEventHandler.java:506)


Results :

Tests in error: 
  TestJobHistoryEventHandler.testTimelineEventHandling:506->handleEvent:718 » Runtime

Tests run: 339, Failures: 0, Errors: 1, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.851 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:41 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 29.175 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  6.004 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [09:45 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 12:06 min
[INFO] Finished at: 2015-10-29T08:02:05+00:00
[INFO] Final Memory: 39M/825M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating YARN-4288
Updating YARN-4130
Updating HDFS-9261
Updating HDFS-8545
Updating HDFS-9044
Updating HADOOP-12327
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testTimelineEventHandling

Error Message:
Failed to connect to timeline server. Connection retries limit exceeded. The posted timeline event may be missing

Stack Trace:
java.lang.RuntimeException: Failed to connect to timeline server. Connection retries limit exceeded. The posted timeline event may be missing
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$TimelineClientConnectionRetry.retryOn(TimelineClientImpl.java:206)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$TimelineJerseyRetryFilter.handle(TimelineClientImpl.java:245)
	at com.sun.jersey.api.client.Client.handle(Client.java:648)
	at com.sun.jersey.api.client.WebResource.handle(WebResource.java:670)
	at com.sun.jersey.api.client.WebResource.access$200(WebResource.java:74)
	at com.sun.jersey.api.client.WebResource$Builder.post(WebResource.java:563)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.doPostingObject(TimelineClientImpl.java:474)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$1.run(TimelineClientImpl.java:323)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$1.run(TimelineClientImpl.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.doPosting(TimelineClientImpl.java:320)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.putEntities(TimelineClientImpl.java:305)
	at org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler.processEventForTimelineServer(JobHistoryEventHandler.java:1015)
	at org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler.handleEvent(JobHistoryEventHandler.java:586)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.handleEvent(TestJobHistoryEventHandler.java:718)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testTimelineEventHandling(TestJobHistoryEventHandler.java:506)



Hadoop-Mapreduce-trunk - Build # 2541 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2541/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 33113 lines...]
  TestAMWebServicesAttempts.<init>:114->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesAttempts.<init>:114->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesAttempts.<init>:114->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesAttempts.<init>:114->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesAttempts.<init>:114->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesAttempts.<init>:114->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesAttempts.<init>:114->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesJobConf.<init>:151->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesJobConf.<init>:151->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesJobConf.<init>:151->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesJobConf.<init>:151->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesAttempt.<init>:130->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesAttempt.<init>:130->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesAttempt.<init>:130->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesAttempt.<init>:130->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestJobHistoryEventHandler.testTimelineEventHandling:506->handleEvent:718 » Runtime

Tests run: 339, Failures: 0, Errors: 37, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.199 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:41 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 29.010 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  4.890 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [09:22 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 11:41 min
[INFO] Finished at: 2015-10-29T05:32:43+00:00
[INFO] Final Memory: 38M/716M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
37 tests failed.
FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testInvalidUri2

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testAMXML

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testInfo

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testInfoDefault

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testAM

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testInvalidAccept

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testBlacklistedNodesXML

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testBlacklistedNodes

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testAMDefault

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testInfoXML

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testInvalidUri

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testInfoSlash

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testAMSlash

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt.testGetTaskAttemptIdXMLState

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt.<init>(TestAMWebServicesAttempt.java:130)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt.testPutTaskAttemptIdState

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt.<init>(TestAMWebServicesAttempt.java:130)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt.testGetTaskAttemptIdState

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt.<init>(TestAMWebServicesAttempt.java:130)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt.testPutTaskAttemptIdXMLState

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt.<init>(TestAMWebServicesAttempt.java:130)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.testTaskAttemptsXML

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.<init>(TestAMWebServicesAttempts.java:114)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.testTaskAttemptIdDefault

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.<init>(TestAMWebServicesAttempts.java:114)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.testTaskAttemptIdInvalid2

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.<init>(TestAMWebServicesAttempts.java:114)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.testTaskAttemptIdInvalid3

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.<init>(TestAMWebServicesAttempts.java:114)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.testTaskAttemptIdInvalid

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.<init>(TestAMWebServicesAttempts.java:114)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.testTaskAttemptId

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.<init>(TestAMWebServicesAttempts.java:114)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.testTaskAttemptIdNonExist

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.<init>(TestAMWebServicesAttempts.java:114)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.testTaskAttemptsDefault

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.<init>(TestAMWebServicesAttempts.java:114)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.testTaskAttemptIdXML

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.<init>(TestAMWebServicesAttempts.java:114)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.testTaskAttempts

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.<init>(TestAMWebServicesAttempts.java:114)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.testTaskAttemptIdXMLCounters

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.<init>(TestAMWebServicesAttempts.java:114)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.testTaskAttemptIdCounters

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.<init>(TestAMWebServicesAttempts.java:114)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.testTaskAttemptsSlash

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.<init>(TestAMWebServicesAttempts.java:114)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.testTaskAttemptIdBogus

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.<init>(TestAMWebServicesAttempts.java:114)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.testTaskAttemptIdSlash

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.<init>(TestAMWebServicesAttempts.java:114)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobConf.testJobConf

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobConf.<init>(TestAMWebServicesJobConf.java:151)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobConf.testJobConfXML

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobConf.<init>(TestAMWebServicesJobConf.java:151)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobConf.testJobConfSlash

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobConf.<init>(TestAMWebServicesJobConf.java:151)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobConf.testJobConfDefault

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobConf.<init>(TestAMWebServicesJobConf.java:151)


FAILED:  org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testTimelineEventHandling

Error Message:
Failed to connect to timeline server. Connection retries limit exceeded. The posted timeline event may be missing

Stack Trace:
java.lang.RuntimeException: Failed to connect to timeline server. Connection retries limit exceeded. The posted timeline event may be missing
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$TimelineClientConnectionRetry.retryOn(TimelineClientImpl.java:206)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$TimelineJerseyRetryFilter.handle(TimelineClientImpl.java:245)
	at com.sun.jersey.api.client.Client.handle(Client.java:648)
	at com.sun.jersey.api.client.WebResource.handle(WebResource.java:670)
	at com.sun.jersey.api.client.WebResource.access$200(WebResource.java:74)
	at com.sun.jersey.api.client.WebResource$Builder.post(WebResource.java:563)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.doPostingObject(TimelineClientImpl.java:474)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$1.run(TimelineClientImpl.java:323)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$1.run(TimelineClientImpl.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.doPosting(TimelineClientImpl.java:320)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.putEntities(TimelineClientImpl.java:305)
	at org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler.processEventForTimelineServer(JobHistoryEventHandler.java:1015)
	at org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler.handleEvent(JobHistoryEventHandler.java:586)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.handleEvent(TestJobHistoryEventHandler.java:718)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testTimelineEventHandling(TestJobHistoryEventHandler.java:506)



Hadoop-Mapreduce-trunk - Build # 2540 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2540/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32368 lines...]
Tests run: 13, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 9.137 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices
Running org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts
Tests run: 15, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 10.023 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts
Running org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs
Tests run: 21, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 11.951 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs
Running org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.729 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt
Running org.apache.hadoop.mapreduce.TestMapreduceConfigFields
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.948 sec - in org.apache.hadoop.mapreduce.TestMapreduceConfigFields

Results :

Tests in error: 
  TestJobHistoryEventHandler.testTimelineEventHandling:506->handleEvent:718 » Runtime

Tests run: 339, Failures: 0, Errors: 1, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  5.900 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:54 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 35.918 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  8.121 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [10:28 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 13:15 min
[INFO] Finished at: 2015-10-29T00:22:06+00:00
[INFO] Final Memory: 41M/826M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9168
Updating HDFS-9295
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testTimelineEventHandling

Error Message:
Failed to connect to timeline server. Connection retries limit exceeded. The posted timeline event may be missing

Stack Trace:
java.lang.RuntimeException: Failed to connect to timeline server. Connection retries limit exceeded. The posted timeline event may be missing
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$TimelineClientConnectionRetry.retryOn(TimelineClientImpl.java:206)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$TimelineJerseyRetryFilter.handle(TimelineClientImpl.java:245)
	at com.sun.jersey.api.client.Client.handle(Client.java:648)
	at com.sun.jersey.api.client.WebResource.handle(WebResource.java:670)
	at com.sun.jersey.api.client.WebResource.access$200(WebResource.java:74)
	at com.sun.jersey.api.client.WebResource$Builder.post(WebResource.java:563)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.doPostingObject(TimelineClientImpl.java:474)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$1.run(TimelineClientImpl.java:323)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$1.run(TimelineClientImpl.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.doPosting(TimelineClientImpl.java:320)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.putEntities(TimelineClientImpl.java:305)
	at org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler.processEventForTimelineServer(JobHistoryEventHandler.java:1015)
	at org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler.handleEvent(JobHistoryEventHandler.java:586)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.handleEvent(TestJobHistoryEventHandler.java:718)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testTimelineEventHandling(TestJobHistoryEventHandler.java:506)



Hadoop-Mapreduce-trunk - Build # 2539 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2539/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32309 lines...]
Running org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs
Tests run: 21, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 11.996 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs
Running org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.17 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt
Running org.apache.hadoop.mapreduce.TestMapreduceConfigFields
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.885 sec - in org.apache.hadoop.mapreduce.TestMapreduceConfigFields

Results :

Failed tests: 
  TestJobImpl.testUnusableNodeTransition:627->assertJobState:977 expected:<SUCCEEDED> but was:<ERROR>

Tests in error: 
  TestJobHistoryEventHandler.testTimelineEventHandling:506->handleEvent:718 » Runtime

Tests run: 339, Failures: 1, Errors: 1, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.207 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:52 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 32.363 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  6.347 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [10:31 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 13:06 min
[INFO] Finished at: 2015-10-28T21:11:25+00:00
[INFO] Final Memory: 41M/825M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating YARN-2859
Updating HDFS-9279
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testTimelineEventHandling

Error Message:
Failed to connect to timeline server. Connection retries limit exceeded. The posted timeline event may be missing

Stack Trace:
java.lang.RuntimeException: Failed to connect to timeline server. Connection retries limit exceeded. The posted timeline event may be missing
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$TimelineClientConnectionRetry.retryOn(TimelineClientImpl.java:206)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$TimelineJerseyRetryFilter.handle(TimelineClientImpl.java:245)
	at com.sun.jersey.api.client.Client.handle(Client.java:648)
	at com.sun.jersey.api.client.WebResource.handle(WebResource.java:670)
	at com.sun.jersey.api.client.WebResource.access$200(WebResource.java:74)
	at com.sun.jersey.api.client.WebResource$Builder.post(WebResource.java:563)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.doPostingObject(TimelineClientImpl.java:474)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$1.run(TimelineClientImpl.java:323)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl$1.run(TimelineClientImpl.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.doPosting(TimelineClientImpl.java:320)
	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.putEntities(TimelineClientImpl.java:305)
	at org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler.processEventForTimelineServer(JobHistoryEventHandler.java:1015)
	at org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler.handleEvent(JobHistoryEventHandler.java:586)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.handleEvent(TestJobHistoryEventHandler.java:718)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testTimelineEventHandling(TestJobHistoryEventHandler.java:506)


FAILED:  org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl.testUnusableNodeTransition

Error Message:
expected:<SUCCEEDED> but was:<ERROR>

Stack Trace:
java.lang.AssertionError: expected:<SUCCEEDED> but was:<ERROR>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:144)
	at org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl.assertJobState(TestJobImpl.java:977)
	at org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl.testUnusableNodeTransition(TestJobImpl.java:627)



Hadoop-Mapreduce-trunk - Build # 2538 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2538/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 35727 lines...]
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 142.842 sec - in org.apache.hadoop.mapreduce.TestMapReduceLazyOutput

Results :

Failed tests: 
  TestMiniMRClientCluster.testRestart:146 Address before restart: asf909.gq1.ygridcore.net:0 is different from new address: asf909.gq1.ygridcore.net:50078 expected:<...9.gq1.ygridcore.net:[0]> but was:<...9.gq1.ygridcore.net:[50078]>

Tests in error: 
  TestJobCounters.testHeapUsageCounter:700->getTaskCounterUsage:578 » IO java.ne...
  TestNonExistentJob.testGetInvalidJob:93 » IO java.net.ConnectException: Call F...
  TestMRJobsWithProfiler.testDifferentProfilers:117->testProfilerInternal:150 » 
  TestMRJobs.testConfVerificationWithJobClient:263->testConfVerification:353 » IO
  TestMRJobs.testContainerRollingLog:694 » IO java.net.ConnectException: Call Fr...
  TestMRJobsWithHistoryService.testJobHistoryData:153 » IO java.net.ConnectExcep...

Tests run: 523, Failures: 1, Errors: 6, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.453 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:53 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 32.163 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  6.290 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:47 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:52 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:59 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:17 h
[INFO] Finished at: 2015-10-28T19:47:03+00:00
[INFO] Final Memory: 34M/708M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HADOOP-12519
Updating HDFS-9255
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
7 tests failed.
FAILED:  org.apache.hadoop.mapred.TestJobCounters.testHeapUsageCounter

Error Message:
java.net.ConnectException: Call From asf909.gq1.ygridcore.net/67.195.81.153 to asf909.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf909.gq1.ygridcore.net/67.195.81.153 to asf909.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1509)
	at org.apache.hadoop.ipc.Client.call(Client.java:1424)
	at org.apache.hadoop.ipc.Client.call(Client.java:1385)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy88.getTaskReports(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getTaskReports(MRClientProtocolPBClientImpl.java:188)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getTaskReports(ClientServiceDelegate.java:453)
	at org.apache.hadoop.mapred.YARNRunner.getTaskReports(YARNRunner.java:619)
	at org.apache.hadoop.mapreduce.Job$3.run(Job.java:538)
	at org.apache.hadoop.mapreduce.Job$3.run(Job.java:536)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.getTaskReports(Job.java:536)
	at org.apache.hadoop.mapred.JobClient.getTaskReports(JobClient.java:666)
	at org.apache.hadoop.mapred.JobClient.getMapTaskReports(JobClient.java:656)
	at org.apache.hadoop.mapred.TestJobCounters.getTaskCounterUsage(TestJobCounters.java:578)
	at org.apache.hadoop.mapred.TestJobCounters.testHeapUsageCounter(TestJobCounters.java:700)


FAILED:  org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart

Error Message:
Address before restart: asf909.gq1.ygridcore.net:0 is different from new address: asf909.gq1.ygridcore.net:50078 expected:<...9.gq1.ygridcore.net:[0]> but was:<...9.gq1.ygridcore.net:[50078]>

Stack Trace:
org.junit.ComparisonFailure: Address before restart: asf909.gq1.ygridcore.net:0 is different from new address: asf909.gq1.ygridcore.net:50078 expected:<...9.gq1.ygridcore.net:[0]> but was:<...9.gq1.ygridcore.net:[50078]>
	at org.junit.Assert.assertEquals(Assert.java:115)
	at org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart(TestMiniMRClientCluster.java:146)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient

Error Message:
java.net.ConnectException: Call From asf909.gq1.ygridcore.net/67.195.81.153 to asf909.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf909.gq1.ygridcore.net/67.195.81.153 to asf909.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1509)
	at org.apache.hadoop.ipc.Client.call(Client.java:1424)
	at org.apache.hadoop.ipc.Client.call(Client.java:1385)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor80.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.getStatus(Job.java:338)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:353)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient(TestMRJobs.java:263)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog

Error Message:
java.net.ConnectException: Call From asf909.gq1.ygridcore.net/67.195.81.153 to asf909.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf909.gq1.ygridcore.net/67.195.81.153 to asf909.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1509)
	at org.apache.hadoop.ipc.Client.call(Client.java:1424)
	at org.apache.hadoop.ipc.Client.call(Client.java:1385)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor80.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isUber(Job.java:1479)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog(TestMRJobs.java:694)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData

Error Message:
java.net.ConnectException: Call From asf909.gq1.ygridcore.net/67.195.81.153 to asf909.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf909.gq1.ygridcore.net/67.195.81.153 to asf909.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1509)
	at org.apache.hadoop.ipc.Client.call(Client.java:1424)
	at org.apache.hadoop.ipc.Client.call(Client.java:1385)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy88.getCounters(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getCounters(MRClientProtocolPBClientImpl.java:166)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobCounters(ClientServiceDelegate.java:380)
	at org.apache.hadoop.mapred.YARNRunner.getJobCounters(YARNRunner.java:589)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:761)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:758)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.getCounters(Job.java:758)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData(TestMRJobsWithHistoryService.java:153)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testDifferentProfilers

Error Message:
test timed out after 150000 milliseconds

Stack Trace:
java.lang.Exception: test timed out after 150000 milliseconds
	at java.lang.Thread.sleep(Native Method)
	at org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1353)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1311)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testProfilerInternal(TestMRJobsWithProfiler.java:150)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testDifferentProfilers(TestMRJobsWithProfiler.java:117)


FAILED:  org.apache.hadoop.mapreduce.v2.TestNonExistentJob.testGetInvalidJob

Error Message:
java.net.ConnectException: Call From asf909.gq1.ygridcore.net/67.195.81.153 to asf909.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf909.gq1.ygridcore.net/67.195.81.153 to asf909.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1509)
	at org.apache.hadoop.ipc.Client.call(Client.java:1424)
	at org.apache.hadoop.ipc.Client.call(Client.java:1385)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy93.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Cluster.getJob(Cluster.java:184)
	at org.apache.hadoop.mapred.JobClient$2.run(JobClient.java:593)
	at org.apache.hadoop.mapred.JobClient$2.run(JobClient.java:591)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapred.JobClient.getJobUsingCluster(JobClient.java:591)
	at org.apache.hadoop.mapred.JobClient.getJobInner(JobClient.java:601)
	at org.apache.hadoop.mapred.JobClient.getJob(JobClient.java:631)
	at org.apache.hadoop.mapreduce.v2.TestNonExistentJob.testGetInvalidJob(TestNonExistentJob.java:93)



Hadoop-Mapreduce-trunk - Build # 2537 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2537/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32273 lines...]
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.167 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestBlocks
Running org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices
Tests run: 13, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 8.362 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices
Running org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts
Tests run: 15, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 9.008 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts
Running org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs
Tests run: 21, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 11.026 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs
Running org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.253 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt
Running org.apache.hadoop.mapreduce.TestMapreduceConfigFields
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.83 sec - in org.apache.hadoop.mapreduce.TestMapreduceConfigFields

Results :

Tests run: 313, Failures: 0, Errors: 0, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.311 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:52 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 32.600 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  6.650 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [08:15 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 10:51 min
[INFO] Finished at: 2015-10-28T13:16:38+00:00
[INFO] Final Memory: 39M/825M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: ExecutionException: java.lang.RuntimeException: The forked VM terminated without properly saying goodbye. VM crash or System.exit called?
[ERROR] Command was /bin/sh -c cd /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app && /home/jenkins/tools/java/jdk1.7.0_55/jre/bin/java -Xmx2048m -XX:MaxPermSize=768m -XX:+HeapDumpOnOutOfMemoryError -jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire/surefirebooter2937225660422046598.jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire/surefire6873355845888788230tmp /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire/surefire_836160620522609844361tmp
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating YARN-4251
Updating HADOOP-12385
Updating HADOOP-12040
Updating HADOOP-12514
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
All tests passed

Hadoop-Mapreduce-trunk - Build # 2536 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2536/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32287 lines...]
Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.927 sec - in org.apache.hadoop.mapred.TestTaskAttemptListenerImpl
Running org.apache.hadoop.mapred.TestTaskAttemptFinishingMonitor
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.356 sec - in org.apache.hadoop.mapred.TestTaskAttemptFinishingMonitor
Running org.apache.hadoop.mapred.TestLocalContainerLauncher
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.622 sec - in org.apache.hadoop.mapred.TestLocalContainerLauncher

Results :

Failed tests: 
  TestMRApp.testUpdatedNodes:262 Expecting 2 more completion events for killed expected:<4> but was:<2>

Tests run: 339, Failures: 1, Errors: 0, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.956 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [02:16 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 38.328 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  6.904 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [10:55 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 14:02 min
[INFO] Finished at: 2015-10-28T09:09:42+00:00
[INFO] Final Memory: 44M/1249M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9302
Updating HDFS-9317
Updating HADOOP-11685
Updating HDFS-9311
Updating HDFS-9231
Updating HADOOP-12520
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.mapreduce.v2.app.TestMRApp.testUpdatedNodes

Error Message:
Expecting 2 more completion events for killed expected:<4> but was:<2>

Stack Trace:
java.lang.AssertionError: Expecting 2 more completion events for killed expected:<4> but was:<2>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:555)
	at org.apache.hadoop.mapreduce.v2.app.TestMRApp.testUpdatedNodes(TestMRApp.java:262)



Hadoop-Mapreduce-trunk - Build # 2535 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2535/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32989 lines...]
Tests in error: 
  TestJobCleanup.tearDown:91 » NoClassDefFound org/apache/hadoop/service/Service...
  TestMRJobsWithHistoryService.testJobHistoryData:153 » IO java.net.ConnectExcep...
  TestJoinDatamerge$1>TestSetup.run:27->setUp:48 » NoClassDefFound org/apache/ha...
  TestMRKeyFieldBasedComparator.testBasicUnixComparator:98->testComparator:68 » NoClassDefFound
  TestMultithreadedMapper.testRuntimeExRun:44->run:69 » NoClassDefFound org/apac...
  TestMultithreadedMapper.testOKRun:37->run:69 » NoClassDefFound org/apache/hado...
  TestMultithreadedMapper.testIOExRun:41->run:69 » NoClassDefFound org/apache/ha...

Tests run: 517, Failures: 1, Errors: 7, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.831 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:41 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 29.350 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  6.068 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:24 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:40 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:37 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 01:54 h
[INFO] Finished at: 2015-10-27T22:47:54+00:00
[INFO] Final Memory: 34M/607M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: ExecutionException: java.lang.RuntimeException: The forked VM terminated without properly saying goodbye. VM crash or System.exit called?
[ERROR] Command was /bin/sh -c cd /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient && /home/jenkins/tools/java/jdk1.7.0_55/jre/bin/java -Xmx2048m -XX:MaxPermSize=768m -XX:+HeapDumpOnOutOfMemoryError -jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefirebooter42617367170533509.jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefire5689050454417925682tmp /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefire_3025601103130147053820tmp
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating MAPREDUCE-6412
Updating HDFS-9307
Updating HDFS-9259
Updating MAPREDUCE-6416
Updating MAPREDUCE-6391
Updating MAPREDUCE-6435
Updating HADOOP-12494
Updating HDFS-9299
Updating HADOOP-12178
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
8 tests failed.
FAILED:  org.apache.hadoop.mapred.TestJobCleanup.org.apache.hadoop.mapred.TestJobCleanup

Error Message:
org/apache/hadoop/service/ServiceOperations

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/service/ServiceOperations
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.service.CompositeService.stop(CompositeService.java:157)
	at org.apache.hadoop.service.CompositeService.serviceStop(CompositeService.java:131)
	at org.apache.hadoop.service.AbstractService.stop(AbstractService.java:221)
	at org.apache.hadoop.mapred.MiniMRYarnClusterAdapter.stop(MiniMRYarnClusterAdapter.java:55)
	at org.apache.hadoop.mapred.MiniMRCluster.shutdown(MiniMRCluster.java:267)
	at org.apache.hadoop.mapred.TestJobCleanup.tearDown(TestJobCleanup.java:91)


FAILED:  org.apache.hadoop.mapreduce.lib.join.TestJoinDatamerge$1.org.apache.hadoop.mapreduce.lib.join.TestJoinDatamerge

Error Message:
org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager$SerializerCompat

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager$SerializerCompat
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager.<init>(DelegationTokenSecretManager.java:70)
	at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.createDelegationTokenSecretManager(FSNamesystem.java:5672)
	at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.<init>(FSNamesystem.java:832)
	at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.<init>(FSNamesystem.java:703)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1005)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:366)
	at org.apache.hadoop.hdfs.DFSTestUtil.formatNameNode(DFSTestUtil.java:228)
	at org.apache.hadoop.hdfs.MiniDFSCluster.configureNameService(MiniDFSCluster.java:1005)
	at org.apache.hadoop.hdfs.MiniDFSCluster.createNameNodesAndSetConf(MiniDFSCluster.java:888)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:820)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:479)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:438)
	at org.apache.hadoop.mapreduce.lib.join.TestJoinDatamerge$1.setUp(TestJoinDatamerge.java:48)


FAILED:  org.apache.hadoop.mapreduce.lib.map.TestMultithreadedMapper.testRuntimeExRun

Error Message:
org/apache/hadoop/yarn/util/Apps

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/util/Apps
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.mapred.LocalDistributedCacheManager.setup(LocalDistributedCacheManager.java:93)
	at org.apache.hadoop.mapred.LocalJobRunner$Job.<init>(LocalJobRunner.java:172)
	at org.apache.hadoop.mapred.LocalJobRunner.submitJob(LocalJobRunner.java:786)
	at org.apache.hadoop.mapreduce.JobSubmitter.submitJobInternal(JobSubmitter.java:241)
	at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1290)
	at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1287)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.submit(Job.java:1287)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1308)
	at org.apache.hadoop.mapreduce.lib.map.TestMultithreadedMapper.run(TestMultithreadedMapper.java:69)
	at org.apache.hadoop.mapreduce.lib.map.TestMultithreadedMapper.testRuntimeExRun(TestMultithreadedMapper.java:44)


FAILED:  org.apache.hadoop.mapreduce.lib.map.TestMultithreadedMapper.testOKRun

Error Message:
org/apache/hadoop/mapreduce/v2/util/MRApps

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/mapreduce/v2/util/MRApps
	at org.apache.hadoop.mapred.LocalDistributedCacheManager.setup(LocalDistributedCacheManager.java:93)
	at org.apache.hadoop.mapred.LocalJobRunner$Job.<init>(LocalJobRunner.java:172)
	at org.apache.hadoop.mapred.LocalJobRunner.submitJob(LocalJobRunner.java:786)
	at org.apache.hadoop.mapreduce.JobSubmitter.submitJobInternal(JobSubmitter.java:241)
	at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1290)
	at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1287)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.submit(Job.java:1287)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1308)
	at org.apache.hadoop.mapreduce.lib.map.TestMultithreadedMapper.run(TestMultithreadedMapper.java:69)
	at org.apache.hadoop.mapreduce.lib.map.TestMultithreadedMapper.testOKRun(TestMultithreadedMapper.java:37)


FAILED:  org.apache.hadoop.mapreduce.lib.map.TestMultithreadedMapper.testIOExRun

Error Message:
org/apache/hadoop/mapreduce/v2/util/MRApps

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/mapreduce/v2/util/MRApps
	at org.apache.hadoop.mapred.LocalDistributedCacheManager.setup(LocalDistributedCacheManager.java:93)
	at org.apache.hadoop.mapred.LocalJobRunner$Job.<init>(LocalJobRunner.java:172)
	at org.apache.hadoop.mapred.LocalJobRunner.submitJob(LocalJobRunner.java:786)
	at org.apache.hadoop.mapreduce.JobSubmitter.submitJobInternal(JobSubmitter.java:241)
	at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1290)
	at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1287)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.submit(Job.java:1287)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1308)
	at org.apache.hadoop.mapreduce.lib.map.TestMultithreadedMapper.run(TestMultithreadedMapper.java:69)
	at org.apache.hadoop.mapreduce.lib.map.TestMultithreadedMapper.testIOExRun(TestMultithreadedMapper.java:41)


FAILED:  org.apache.hadoop.mapreduce.lib.partition.TestMRKeyFieldBasedComparator.testBasicUnixComparator

Error Message:
org/apache/hadoop/yarn/exceptions/YarnRuntimeException

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/exceptions/YarnRuntimeException
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.mapred.LocalDistributedCacheManager.setup(LocalDistributedCacheManager.java:93)
	at org.apache.hadoop.mapred.LocalJobRunner$Job.<init>(LocalJobRunner.java:172)
	at org.apache.hadoop.mapred.LocalJobRunner.submitJob(LocalJobRunner.java:786)
	at org.apache.hadoop.mapreduce.JobSubmitter.submitJobInternal(JobSubmitter.java:241)
	at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1290)
	at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1287)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.submit(Job.java:1287)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1308)
	at org.apache.hadoop.mapreduce.lib.partition.TestMRKeyFieldBasedComparator.testComparator(TestMRKeyFieldBasedComparator.java:68)
	at org.apache.hadoop.mapreduce.lib.partition.TestMRKeyFieldBasedComparator.testBasicUnixComparator(TestMRKeyFieldBasedComparator.java:98)


FAILED:  org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart

Error Message:
Address before restart: asf905.gq1.ygridcore.net:0 is different from new address: asf905.gq1.ygridcore.net:44827 expected:<...5.gq1.ygridcore.net:[0]> but was:<...5.gq1.ygridcore.net:[44827]>

Stack Trace:
org.junit.ComparisonFailure: Address before restart: asf905.gq1.ygridcore.net:0 is different from new address: asf905.gq1.ygridcore.net:44827 expected:<...5.gq1.ygridcore.net:[0]> but was:<...5.gq1.ygridcore.net:[44827]>
	at org.junit.Assert.assertEquals(Assert.java:115)
	at org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart(TestMiniMRClientCluster.java:146)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData

Error Message:
java.net.ConnectException: Call From asf905.gq1.ygridcore.net/67.195.81.149 to asf905.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf905.gq1.ygridcore.net/67.195.81.149 to asf905.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1509)
	at org.apache.hadoop.ipc.Client.call(Client.java:1424)
	at org.apache.hadoop.ipc.Client.call(Client.java:1385)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy88.getCounters(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getCounters(MRClientProtocolPBClientImpl.java:166)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobCounters(ClientServiceDelegate.java:380)
	at org.apache.hadoop.mapred.YARNRunner.getJobCounters(YARNRunner.java:589)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:761)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:758)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.getCounters(Job.java:758)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData(TestMRJobsWithHistoryService.java:153)



Hadoop-Mapreduce-trunk - Build # 2534 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2534/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 38875 lines...]
Running org.apache.hadoop.mapred.TestLazyOutput
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 137.541 sec - in org.apache.hadoop.mapred.TestLazyOutput
Running org.apache.hadoop.mapred.TestSequenceFileInputFilter
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 8.906 sec - in org.apache.hadoop.mapred.TestSequenceFileInputFilter
Running org.apache.hadoop.mapred.TestMultipleLevelCaching
Tests run: 1, Failures: 0, Errors: 0, Skipped: 1, Time elapsed: 0.047 sec - in org.apache.hadoop.mapred.TestMultipleLevelCaching
Running org.apache.hadoop.mapred.TestJobCleanup

Results :

Failed tests: 
  TestMiniMRClientCluster.testRestart:146 Address before restart: asf904.gq1.ygridcore.net:0 is different from new address: asf904.gq1.ygridcore.net:45853 expected:<...4.gq1.ygridcore.net:[0]> but was:<...4.gq1.ygridcore.net:[45853]>

Tests in error: 
  TestMRJobsWithHistoryService.testJobHistoryData:153 » IO java.net.ConnectExcep...
  TestMRJobs.testContainerRollingLog:652 »  test timed out after 120000 millisec...

Tests run: 423, Failures: 1, Errors: 1, Skipped: 9

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.790 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:50 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 32.031 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.664 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:37 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:43 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:47 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:05 h
[INFO] Finished at: 2015-10-27T18:50:07+00:00
[INFO] Final Memory: 41M/612M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: ExecutionException: java.lang.RuntimeException: The forked VM terminated without properly saying goodbye. VM crash or System.exit called?
[ERROR] Command was /bin/sh -c cd /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient && /home/jenkins/tools/java/jdk1.7.0_55/jre/bin/java -Xmx2048m -XX:MaxPermSize=768m -XX:+HeapDumpOnOutOfMemoryError -jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefirebooter6938520658650070540.jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefire4539190232445715093tmp /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefire_1897352922868065595454tmp
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating YARN-4302
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart

Error Message:
Address before restart: asf904.gq1.ygridcore.net:0 is different from new address: asf904.gq1.ygridcore.net:45853 expected:<...4.gq1.ygridcore.net:[0]> but was:<...4.gq1.ygridcore.net:[45853]>

Stack Trace:
org.junit.ComparisonFailure: Address before restart: asf904.gq1.ygridcore.net:0 is different from new address: asf904.gq1.ygridcore.net:45853 expected:<...4.gq1.ygridcore.net:[0]> but was:<...4.gq1.ygridcore.net:[45853]>
	at org.junit.Assert.assertEquals(Assert.java:115)
	at org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart(TestMiniMRClientCluster.java:146)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData

Error Message:
java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1509)
	at org.apache.hadoop.ipc.Client.call(Client.java:1424)
	at org.apache.hadoop.ipc.Client.call(Client.java:1385)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy88.getCounters(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getCounters(MRClientProtocolPBClientImpl.java:166)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobCounters(ClientServiceDelegate.java:380)
	at org.apache.hadoop.mapred.YARNRunner.getJobCounters(YARNRunner.java:589)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:761)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:758)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.getCounters(Job.java:758)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData(TestMRJobsWithHistoryService.java:153)



Hadoop-Mapreduce-trunk - Build # 2533 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2533/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 36710 lines...]
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.616 sec - in org.apache.hadoop.mapred.TestSequenceFileAsBinaryOutputFormat

Results :

Failed tests: 
  TestSpeculativeExecution.testSpeculativeExecution:250 null
  TestMiniMRClientCluster.testRestart:146 Address before restart: asf907.gq1.ygridcore.net:0 is different from new address: asf907.gq1.ygridcore.net:45174 expected:<...7.gq1.ygridcore.net:[0]> but was:<...7.gq1.ygridcore.net:[45174]>

Tests in error: 
  TestUberAM.testFailingMapper:132 » IO java.net.ConnectException: Call From asf...
  TestUberAM>TestMRJobs.testDistributedCache:916->TestMRJobs._testDistributedCache:906 » IO
  TestUberAM>TestMRJobs.testConfVerificationWithJobClient:263->TestMRJobs.testConfVerification:348 » IO
  TestUberAM>TestMRJobs.testContainerRollingLog:694 » IO java.net.ConnectExcepti...
  TestMRJobs.testConfVerificationWithJobClient:263->testConfVerification:353 » IO
  TestMRJobs.testContainerRollingLog:694 » IO java.net.ConnectException: Call Fr...
  TestMRJobsWithProfiler.testDifferentProfilers:117->testProfilerInternal:150 » 
  TestNonExistentJob.testGetInvalidJob:93 » IO java.net.ConnectException: Call F...
  TestMRJobsWithHistoryService.testJobHistoryData:153 » IO java.net.ConnectExcep...

Tests run: 522, Failures: 2, Errors: 9, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  4.106 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [02:16 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 43.784 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  6.149 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [10:44 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [06:24 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  02:18 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:39 h
[INFO] Finished at: 2015-10-27T14:47:46+00:00
[INFO] Final Memory: 39M/708M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There was a timeout or other error in the fork -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HADOOP-12515
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
11 tests failed.
FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient

Error Message:
java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor80.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.getStatus(Job.java:338)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:353)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient(TestMRJobs.java:263)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog

Error Message:
java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor80.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isUber(Job.java:1479)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog(TestMRJobs.java:694)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testDifferentProfilers

Error Message:
test timed out after 150000 milliseconds

Stack Trace:
java.lang.Exception: test timed out after 150000 milliseconds
	at java.lang.Thread.sleep(Native Method)
	at org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1353)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1311)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testProfilerInternal(TestMRJobsWithProfiler.java:150)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testDifferentProfilers(TestMRJobsWithProfiler.java:117)


FAILED:  org.apache.hadoop.mapreduce.v2.TestNonExistentJob.testGetInvalidJob

Error Message:
java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy93.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Cluster.getJob(Cluster.java:184)
	at org.apache.hadoop.mapred.JobClient$2.run(JobClient.java:593)
	at org.apache.hadoop.mapred.JobClient$2.run(JobClient.java:591)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapred.JobClient.getJobUsingCluster(JobClient.java:591)
	at org.apache.hadoop.mapred.JobClient.getJobInner(JobClient.java:601)
	at org.apache.hadoop.mapred.JobClient.getJob(JobClient.java:631)
	at org.apache.hadoop.mapreduce.v2.TestNonExistentJob.testGetInvalidJob(TestNonExistentJob.java:93)


FAILED:  org.apache.hadoop.mapreduce.v2.TestSpeculativeExecution.testSpeculativeExecution

Error Message:
null

Stack Trace:
java.lang.AssertionError: null
	at org.junit.Assert.fail(Assert.java:86)
	at org.junit.Assert.assertTrue(Assert.java:41)
	at org.junit.Assert.assertTrue(Assert.java:52)
	at org.apache.hadoop.mapreduce.v2.TestSpeculativeExecution.testSpeculativeExecution(TestSpeculativeExecution.java:250)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testFailingMapper

Error Message:
java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getTaskAttemptCompletionEvents(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getTaskAttemptCompletionEvents(MRClientProtocolPBClientImpl.java:177)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getTaskCompletionEvents(ClientServiceDelegate.java:395)
	at org.apache.hadoop.mapred.YARNRunner.getTaskCompletionEvents(YARNRunner.java:607)
	at org.apache.hadoop.mapreduce.Job$5.run(Job.java:673)
	at org.apache.hadoop.mapreduce.Job$5.run(Job.java:670)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.getTaskCompletionEvents(Job.java:670)
	at org.apache.hadoop.mapreduce.v2.TestUberAM.testFailingMapper(TestUberAM.java:132)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testDistributedCache

Error Message:
java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:343)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs._testDistributedCache(TestMRJobs.java:906)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testDistributedCache(TestMRJobs.java:916)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
	at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:792)
	at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:732)
	at org.apache.hadoop.ipc.Client.call(Client.java:1447)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs._testDistributedCache(TestMRJobs.java:906)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testDistributedCache(TestMRJobs.java:916)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Connection refused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs._testDistributedCache(TestMRJobs.java:906)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testDistributedCache(TestMRJobs.java:916)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testConfVerificationWithJobClient

Error Message:
java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:343)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapred.JobClient$NetworkedJob.waitForCompletion(JobClient.java:287)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:348)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient(TestMRJobs.java:263)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
	at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:792)
	at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:732)
	at org.apache.hadoop.ipc.Client.call(Client.java:1447)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapred.JobClient$NetworkedJob.waitForCompletion(JobClient.java:287)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:348)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient(TestMRJobs.java:263)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Connection refused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapred.JobClient$NetworkedJob.waitForCompletion(JobClient.java:287)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:348)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient(TestMRJobs.java:263)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testContainerRollingLog

Error Message:
java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:343)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isUber(Job.java:1479)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog(TestMRJobs.java:694)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
	at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:792)
	at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:732)
	at org.apache.hadoop.ipc.Client.call(Client.java:1447)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isUber(Job.java:1479)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog(TestMRJobs.java:694)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Connection refused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isUber(Job.java:1479)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog(TestMRJobs.java:694)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart

Error Message:
Address before restart: asf907.gq1.ygridcore.net:0 is different from new address: asf907.gq1.ygridcore.net:45174 expected:<...7.gq1.ygridcore.net:[0]> but was:<...7.gq1.ygridcore.net:[45174]>

Stack Trace:
org.junit.ComparisonFailure: Address before restart: asf907.gq1.ygridcore.net:0 is different from new address: asf907.gq1.ygridcore.net:45174 expected:<...7.gq1.ygridcore.net:[0]> but was:<...7.gq1.ygridcore.net:[45174]>
	at org.junit.Assert.assertEquals(Assert.java:115)
	at org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart(TestMiniMRClientCluster.java:146)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData

Error Message:
java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy88.getCounters(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getCounters(MRClientProtocolPBClientImpl.java:166)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobCounters(ClientServiceDelegate.java:380)
	at org.apache.hadoop.mapred.YARNRunner.getJobCounters(YARNRunner.java:589)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:761)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:758)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.getCounters(Job.java:758)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData(TestMRJobsWithHistoryService.java:153)



Hadoop-Mapreduce-trunk - Build # 2532 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2532/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 33391 lines...]
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 6.512 sec - in org.apache.hadoop.mapred.pipes.TestPipeApplication
Running org.apache.hadoop.mapred.TestJavaSerialization
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 4.933 sec - in org.apache.hadoop.mapred.TestJavaSerialization
Running org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 28.371 sec - in org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Running org.apache.hadoop.ipc.TestMRCJCSocketFactory
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 21.255 sec - in org.apache.hadoop.ipc.TestMRCJCSocketFactory

Results :

Failed tests: 
  TestMiniMRClientCluster.testRestart:146 Address before restart: asf904.gq1.ygridcore.net:0 is different from new address: asf904.gq1.ygridcore.net:44103 expected:<...4.gq1.ygridcore.net:[0]> but was:<...4.gq1.ygridcore.net:[44103]>

Tests in error: 
  TestMRJobsWithHistoryService.testJobHistoryData:153 » IO java.net.ConnectExcep...

Tests run: 523, Failures: 1, Errors: 1, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.641 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:41 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 28.803 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  4.871 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:14 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:40 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:45 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:03 h
[INFO] Finished at: 2015-10-27T09:34:49+00:00
[INFO] Final Memory: 38M/981M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating YARN-3573
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart

Error Message:
Address before restart: asf904.gq1.ygridcore.net:0 is different from new address: asf904.gq1.ygridcore.net:44103 expected:<...4.gq1.ygridcore.net:[0]> but was:<...4.gq1.ygridcore.net:[44103]>

Stack Trace:
org.junit.ComparisonFailure: Address before restart: asf904.gq1.ygridcore.net:0 is different from new address: asf904.gq1.ygridcore.net:44103 expected:<...4.gq1.ygridcore.net:[0]> but was:<...4.gq1.ygridcore.net:[44103]>
	at org.junit.Assert.assertEquals(Assert.java:115)
	at org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart(TestMiniMRClientCluster.java:146)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData

Error Message:
java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy88.getCounters(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getCounters(MRClientProtocolPBClientImpl.java:166)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobCounters(ClientServiceDelegate.java:380)
	at org.apache.hadoop.mapred.YARNRunner.getJobCounters(YARNRunner.java:589)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:761)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:758)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.getCounters(Job.java:758)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData(TestMRJobsWithHistoryService.java:153)



Hadoop-Mapreduce-trunk - Build # 2531 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2531/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32842 lines...]
Running org.apache.hadoop.mapreduce.jobhistory.TestEvents
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.674 sec - in org.apache.hadoop.mapreduce.jobhistory.TestEvents
Running org.apache.hadoop.mapreduce.jobhistory.TestJobSummary
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.395 sec - in org.apache.hadoop.mapreduce.jobhistory.TestJobSummary
Running org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler
Tests run: 13, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 22.985 sec - in org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler

Results :

Tests run: 336, Failures: 0, Errors: 0, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.778 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:39 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 28.844 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.821 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [09:08 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 11:26 min
[INFO] Finished at: 2015-10-27T03:58:50+00:00
[INFO] Final Memory: 40M/825M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: ExecutionException: java.lang.RuntimeException: The forked VM terminated without properly saying goodbye. VM crash or System.exit called?
[ERROR] Command was /bin/sh -c cd /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app && /home/jenkins/tools/java/jdk1.7.0_55/jre/bin/java -Xmx2048m -XX:MaxPermSize=768m -XX:+HeapDumpOnOutOfMemoryError -jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire/surefirebooter8399627137917136278.jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire/surefire8232022286778609999tmp /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire/surefire_1021788888447251697902tmp
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9292
Updating HDFS-9291
Updating YARN-4169
Updating YARN-3216
Updating HADOOP-12457
Updating HDFS-9305
Updating HDFS-8945
Updating YARN-4300
Updating HDFS-9284
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
All tests passed

Hadoop-Mapreduce-trunk - Build # 2530 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2530/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 33795 lines...]
  TestMiniMRClientCluster.testRestart:146 Address before restart: asf904.gq1.ygridcore.net:0 is different from new address: asf904.gq1.ygridcore.net:54984 expected:<...4.gq1.ygridcore.net:[0]> but was:<...4.gq1.ygridcore.net:[54984]>

Tests in error: 
  TestUberAM.testFailingMapper:132 » IO java.net.ConnectException: Call From asf...
  TestUberAM>TestMRJobs.testDistributedCache:916->TestMRJobs._testDistributedCache:906 » IO
  TestUberAM>TestMRJobs.testConfVerificationWithJobClient:263->TestMRJobs.testConfVerification:348 » IO
  TestUberAM>TestMRJobs.testContainerRollingLog:694 » IO java.net.ConnectExcepti...
  TestMRJobsWithHistoryService.testJobHistoryData:153 » IO java.net.ConnectExcep...
  TestMRJobs.testConfVerificationWithJobClient:263->testConfVerification:353 » IO
  TestMRJobs.testContainerRollingLog:694 » IO java.net.ConnectException: Call Fr...

Tests run: 523, Failures: 1, Errors: 7, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.809 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:41 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 29.386 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.332 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:19 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:38 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:46 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:03 h
[INFO] Finished at: 2015-10-27T00:11:22+00:00
[INFO] Final Memory: 34M/750M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HADOOP-12472
Updating YARN-4285
Updating YARN-4284
Updating HDFS-9304
Updating HDFS-9268
Updating HDFS-7284
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
8 tests failed.
FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient

Error Message:
java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor81.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.getStatus(Job.java:338)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:353)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient(TestMRJobs.java:263)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog

Error Message:
java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor81.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isUber(Job.java:1479)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog(TestMRJobs.java:694)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testFailingMapper

Error Message:
java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getTaskAttemptCompletionEvents(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getTaskAttemptCompletionEvents(MRClientProtocolPBClientImpl.java:177)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getTaskCompletionEvents(ClientServiceDelegate.java:395)
	at org.apache.hadoop.mapred.YARNRunner.getTaskCompletionEvents(YARNRunner.java:607)
	at org.apache.hadoop.mapreduce.Job$5.run(Job.java:673)
	at org.apache.hadoop.mapreduce.Job$5.run(Job.java:670)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.getTaskCompletionEvents(Job.java:670)
	at org.apache.hadoop.mapreduce.v2.TestUberAM.testFailingMapper(TestUberAM.java:132)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testDistributedCache

Error Message:
java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:343)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs._testDistributedCache(TestMRJobs.java:906)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testDistributedCache(TestMRJobs.java:916)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
	at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:792)
	at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:732)
	at org.apache.hadoop.ipc.Client.call(Client.java:1447)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs._testDistributedCache(TestMRJobs.java:906)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testDistributedCache(TestMRJobs.java:916)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Connection refused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs._testDistributedCache(TestMRJobs.java:906)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testDistributedCache(TestMRJobs.java:916)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testConfVerificationWithJobClient

Error Message:
java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:343)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapred.JobClient$NetworkedJob.waitForCompletion(JobClient.java:287)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:348)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient(TestMRJobs.java:263)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
	at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:792)
	at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:732)
	at org.apache.hadoop.ipc.Client.call(Client.java:1447)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapred.JobClient$NetworkedJob.waitForCompletion(JobClient.java:287)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:348)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient(TestMRJobs.java:263)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Connection refused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapred.JobClient$NetworkedJob.waitForCompletion(JobClient.java:287)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:348)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient(TestMRJobs.java:263)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testContainerRollingLog

Error Message:
java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:343)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isUber(Job.java:1479)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog(TestMRJobs.java:694)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
	at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:792)
	at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:732)
	at org.apache.hadoop.ipc.Client.call(Client.java:1447)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isUber(Job.java:1479)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog(TestMRJobs.java:694)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Connection refused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isUber(Job.java:1479)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog(TestMRJobs.java:694)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart

Error Message:
Address before restart: asf904.gq1.ygridcore.net:0 is different from new address: asf904.gq1.ygridcore.net:54984 expected:<...4.gq1.ygridcore.net:[0]> but was:<...4.gq1.ygridcore.net:[54984]>

Stack Trace:
org.junit.ComparisonFailure: Address before restart: asf904.gq1.ygridcore.net:0 is different from new address: asf904.gq1.ygridcore.net:54984 expected:<...4.gq1.ygridcore.net:[0]> but was:<...4.gq1.ygridcore.net:[54984]>
	at org.junit.Assert.assertEquals(Assert.java:115)
	at org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart(TestMiniMRClientCluster.java:146)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData

Error Message:
java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy88.getCounters(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getCounters(MRClientProtocolPBClientImpl.java:166)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobCounters(ClientServiceDelegate.java:380)
	at org.apache.hadoop.mapred.YARNRunner.getJobCounters(YARNRunner.java:589)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:761)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:758)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.getCounters(Job.java:758)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData(TestMRJobsWithHistoryService.java:153)



Hadoop-Mapreduce-trunk - Build # 2529 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2529/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 36620 lines...]

Failed tests: 
  TestMiniMRClientCluster.testRestart:146 Address before restart: asf907.gq1.ygridcore.net:0 is different from new address: asf907.gq1.ygridcore.net:47640 expected:<...7.gq1.ygridcore.net:[]0> but was:<...7.gq1.ygridcore.net:[4764]0>

Tests in error: 
  TestMapReduceLazyOutput.testLazyOutput:137 » NoClassDefFound org/apache/hadoop...
  TestUberAM.testFailingMapper:132 » IO java.net.ConnectException: Call From asf...
  TestUberAM>TestMRJobs.testDistributedCache:916->TestMRJobs._testDistributedCache:906 » IO
  TestUberAM>TestMRJobs.testConfVerificationWithJobClient:263->TestMRJobs.testConfVerification:348 » IO
  TestUberAM>TestMRJobs.testContainerRollingLog:694 » IO java.net.ConnectExcepti...
  TestMRJobs.testConfVerificationWithJobClient:263->testConfVerification:353 » IO
  TestMRJobs.testContainerRollingLog:694 » IO java.net.ConnectException: Call Fr...
  TestMRJobsWithProfiler.testDifferentProfilers:117->testProfilerInternal:150 » 
  TestNonExistentJob.testGetInvalidJob:93 » IO java.net.ConnectException: Call F...
  TestMRJobsWithHistoryService.testJobHistoryData:153 » IO java.net.ConnectExcep...

Tests run: 523, Failures: 1, Errors: 10, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.758 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [02:19 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 38.713 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  7.178 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [11:27 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [06:55 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  02:03 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:24 h
[INFO] Finished at: 2015-10-26T17:33:08+00:00
[INFO] Final Memory: 34M/750M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HADOOP-12513
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
11 tests failed.
FAILED:  org.apache.hadoop.mapreduce.TestMapReduceLazyOutput.testLazyOutput

Error Message:
org/apache/hadoop/yarn/server/MiniYARNCluster

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/MiniYARNCluster
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:58)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapreduce.TestMapReduceLazyOutput.testLazyOutput(TestMapReduceLazyOutput.java:137)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient

Error Message:
java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor80.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.getStatus(Job.java:338)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:353)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient(TestMRJobs.java:263)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog

Error Message:
java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor80.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isUber(Job.java:1479)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog(TestMRJobs.java:694)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testDifferentProfilers

Error Message:
test timed out after 150000 milliseconds

Stack Trace:
java.lang.Exception: test timed out after 150000 milliseconds
	at java.lang.Thread.sleep(Native Method)
	at org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1353)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1311)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testProfilerInternal(TestMRJobsWithProfiler.java:150)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testDifferentProfilers(TestMRJobsWithProfiler.java:117)


FAILED:  org.apache.hadoop.mapreduce.v2.TestNonExistentJob.testGetInvalidJob

Error Message:
java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy93.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Cluster.getJob(Cluster.java:184)
	at org.apache.hadoop.mapred.JobClient$2.run(JobClient.java:593)
	at org.apache.hadoop.mapred.JobClient$2.run(JobClient.java:591)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapred.JobClient.getJobUsingCluster(JobClient.java:591)
	at org.apache.hadoop.mapred.JobClient.getJobInner(JobClient.java:601)
	at org.apache.hadoop.mapred.JobClient.getJob(JobClient.java:631)
	at org.apache.hadoop.mapreduce.v2.TestNonExistentJob.testGetInvalidJob(TestNonExistentJob.java:93)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testFailingMapper

Error Message:
java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getTaskAttemptCompletionEvents(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getTaskAttemptCompletionEvents(MRClientProtocolPBClientImpl.java:177)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getTaskCompletionEvents(ClientServiceDelegate.java:395)
	at org.apache.hadoop.mapred.YARNRunner.getTaskCompletionEvents(YARNRunner.java:607)
	at org.apache.hadoop.mapreduce.Job$5.run(Job.java:673)
	at org.apache.hadoop.mapreduce.Job$5.run(Job.java:670)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.getTaskCompletionEvents(Job.java:670)
	at org.apache.hadoop.mapreduce.v2.TestUberAM.testFailingMapper(TestUberAM.java:132)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testDistributedCache

Error Message:
java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:343)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs._testDistributedCache(TestMRJobs.java:906)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testDistributedCache(TestMRJobs.java:916)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
	at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:792)
	at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:732)
	at org.apache.hadoop.ipc.Client.call(Client.java:1447)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs._testDistributedCache(TestMRJobs.java:906)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testDistributedCache(TestMRJobs.java:916)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Connection refused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs._testDistributedCache(TestMRJobs.java:906)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testDistributedCache(TestMRJobs.java:916)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testConfVerificationWithJobClient

Error Message:
java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:343)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapred.JobClient$NetworkedJob.waitForCompletion(JobClient.java:287)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:348)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient(TestMRJobs.java:263)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
	at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:792)
	at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:732)
	at org.apache.hadoop.ipc.Client.call(Client.java:1447)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapred.JobClient$NetworkedJob.waitForCompletion(JobClient.java:287)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:348)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient(TestMRJobs.java:263)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Connection refused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapred.JobClient$NetworkedJob.waitForCompletion(JobClient.java:287)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:348)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient(TestMRJobs.java:263)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testContainerRollingLog

Error Message:
java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:343)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isUber(Job.java:1479)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog(TestMRJobs.java:694)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
	at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:792)
	at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:732)
	at org.apache.hadoop.ipc.Client.call(Client.java:1447)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isUber(Job.java:1479)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog(TestMRJobs.java:694)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Connection refused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isUber(Job.java:1479)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog(TestMRJobs.java:694)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart

Error Message:
Address before restart: asf907.gq1.ygridcore.net:0 is different from new address: asf907.gq1.ygridcore.net:47640 expected:<...7.gq1.ygridcore.net:[]0> but was:<...7.gq1.ygridcore.net:[4764]0>

Stack Trace:
org.junit.ComparisonFailure: Address before restart: asf907.gq1.ygridcore.net:0 is different from new address: asf907.gq1.ygridcore.net:47640 expected:<...7.gq1.ygridcore.net:[]0> but was:<...7.gq1.ygridcore.net:[4764]0>
	at org.junit.Assert.assertEquals(Assert.java:115)
	at org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart(TestMiniMRClientCluster.java:146)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData

Error Message:
java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy88.getCounters(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getCounters(MRClientProtocolPBClientImpl.java:166)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobCounters(ClientServiceDelegate.java:380)
	at org.apache.hadoop.mapred.YARNRunner.getJobCounters(YARNRunner.java:589)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:761)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:758)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.getCounters(Job.java:758)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData(TestMRJobsWithHistoryService.java:153)



Hadoop-Mapreduce-trunk - Build # 2528 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2528/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32386 lines...]
[INFO] Apache Hadoop MapReduce Core
[INFO] Apache Hadoop MapReduce Common
[INFO] Apache Hadoop MapReduce Shuffle
[INFO] Apache Hadoop MapReduce App
[INFO] Apache Hadoop MapReduce HistoryServer
[INFO] Apache Hadoop MapReduce JobClient
[INFO] Apache Hadoop MapReduce HistoryServer Plugins
[INFO] Apache Hadoop MapReduce NativeTask
[INFO] Apache Hadoop MapReduce Examples
[INFO] Apache Hadoop MapReduce
[INFO] 
[INFO] Using the builder org.apache.maven.lifecycle.internal.builder.singlethreaded.SingleThreadedBuilder with a thread count of 1
[INFO]                                                                         
[INFO] ------------------------------------------------------------------------
[INFO] Building Apache Hadoop MapReduce Client 3.0.0-SNAPSHOT
[INFO] ------------------------------------------------------------------------
Downloading: https://repository.apache.org/content/repositories/snapshots/com/google/re2j/re2j/${re2j.version}/re2j-${re2j.version}.pom

Downloading: http://repository.jboss.org/nexus/content/groups/public/com/google/re2j/re2j/${re2j.version}/re2j-${re2j.version}.pom

Downloading: http://repo.maven.apache.org/maven2/com/google/re2j/re2j/${re2j.version}/re2j-${re2j.version}.pom

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... FAILURE [  0.966 s]
[INFO] Apache Hadoop MapReduce Core ...................... SKIPPED
[INFO] Apache Hadoop MapReduce Common .................... SKIPPED
[INFO] Apache Hadoop MapReduce Shuffle ................... SKIPPED
[INFO] Apache Hadoop MapReduce App ....................... SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 2.304 s
[INFO] Finished at: 2015-10-26T11:52:31+00:00
[INFO] Final Memory: 18M/723M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal on project hadoop-mapreduce-client: Could not resolve dependencies for project org.apache.hadoop:hadoop-mapreduce-client:pom:3.0.0-SNAPSHOT: Failed to collect dependencies at org.apache.hadoop:hadoop-common:jar:3.0.0-SNAPSHOT -> com.google.re2j:re2j:jar:${re2j.version}: Failed to read artifact descriptor for com.google.re2j:re2j:jar:${re2j.version}: Could not transfer artifact com.google.re2j:re2j:pom:${re2j.version} from/to apache.snapshots.https (https://repository.apache.org/content/repositories/snapshots): Illegal character in path at index 83: https://repository.apache.org/content/repositories/snapshots/com/google/re2j/re2j/${re2j.version}/re2j-${re2j.version}.pom -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/DependencyResolutionException
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
ERROR: Publisher 'Publish JUnit test result report' failed: No test report files were found. Configuration error?
Updating YARN-4223
Updating YARN-2729
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
No tests ran.

Hadoop-Mapreduce-trunk - Build # 2527 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2527/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 33380 lines...]
Running org.apache.hadoop.mapred.pipes.TestPipes
Tests run: 1, Failures: 0, Errors: 0, Skipped: 1, Time elapsed: 0.047 sec - in org.apache.hadoop.mapred.pipes.TestPipes
Running org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 26.41 sec - in org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Running org.apache.hadoop.mapred.TestReporter
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 13.706 sec - in org.apache.hadoop.mapred.TestReporter

Results :

Failed tests: 
  TestMiniMRClientCluster.testRestart:146 Address before restart: asf902.gq1.ygridcore.net:0 is different from new address: asf902.gq1.ygridcore.net:39047 expected:<...2.gq1.ygridcore.net:[0]> but was:<...2.gq1.ygridcore.net:[39047]>

Tests in error: 
  TestMRJobsWithHistoryService.testJobHistoryData:153 » IO java.net.ConnectExcep...

Tests run: 523, Failures: 1, Errors: 1, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.800 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:40 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 29.259 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  4.910 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:21 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:36 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:43 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:00 h
[INFO] Finished at: 2015-10-26T11:07:59+00:00
[INFO] Final Memory: 35M/708M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating YARN-4246
Updating YARN-3528
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart

Error Message:
Address before restart: asf902.gq1.ygridcore.net:0 is different from new address: asf902.gq1.ygridcore.net:39047 expected:<...2.gq1.ygridcore.net:[0]> but was:<...2.gq1.ygridcore.net:[39047]>

Stack Trace:
org.junit.ComparisonFailure: Address before restart: asf902.gq1.ygridcore.net:0 is different from new address: asf902.gq1.ygridcore.net:39047 expected:<...2.gq1.ygridcore.net:[0]> but was:<...2.gq1.ygridcore.net:[39047]>
	at org.junit.Assert.assertEquals(Assert.java:115)
	at org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart(TestMiniMRClientCluster.java:146)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData

Error Message:
java.net.ConnectException: Call From asf902.gq1.ygridcore.net/67.195.81.146 to asf902.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf902.gq1.ygridcore.net/67.195.81.146 to asf902.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy88.getCounters(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getCounters(MRClientProtocolPBClientImpl.java:166)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobCounters(ClientServiceDelegate.java:380)
	at org.apache.hadoop.mapred.YARNRunner.getJobCounters(YARNRunner.java:589)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:761)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:758)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.getCounters(Job.java:758)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData(TestMRJobsWithHistoryService.java:153)



Hadoop-Mapreduce-trunk - Build # 2526 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2526/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 33383 lines...]
Running org.apache.hadoop.mapred.pipes.TestPipeApplication
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.554 sec - in org.apache.hadoop.mapred.pipes.TestPipeApplication
Running org.apache.hadoop.mapred.pipes.TestPipes
Tests run: 1, Failures: 0, Errors: 0, Skipped: 1, Time elapsed: 0.049 sec - in org.apache.hadoop.mapred.pipes.TestPipes
Running org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 26.779 sec - in org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Running org.apache.hadoop.mapred.TestReporter
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 13.702 sec - in org.apache.hadoop.mapred.TestReporter

Results :

Tests in error: 
  TestMRJobsWithHistoryService.testJobHistoryData:153 » IO java.net.ConnectExcep...
  TestMiniMRClientCluster.testRestart:114 » YarnRuntime java.io.IOException: Res...

Tests run: 523, Failures: 0, Errors: 2, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.816 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:39 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 28.918 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  4.950 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:21 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:36 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:46 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:03 h
[INFO] Finished at: 2015-10-26T08:29:24+00:00
[INFO] Final Memory: 34M/719M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Error updating JIRA issues. Saving issues for next build.
java.lang.NullPointerException
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart

Error Message:
java.io.IOException: ResourceManager failed to start. Final state is STOPPED

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: java.io.IOException: ResourceManager failed to start. Final state is STOPPED
	at org.apache.hadoop.yarn.server.MiniYARNCluster.startResourceManager(MiniYARNCluster.java:330)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$500(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceStart(MiniYARNCluster.java:456)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.service.CompositeService.serviceStart(CompositeService.java:120)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.mapred.MiniMRYarnClusterAdapter.restart(MiniMRYarnClusterAdapter.java:73)
	at org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart(TestMiniMRClientCluster.java:114)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData

Error Message:
java.net.ConnectException: Call From asf902.gq1.ygridcore.net/67.195.81.146 to asf902.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf902.gq1.ygridcore.net/67.195.81.146 to asf902.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy88.getCounters(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getCounters(MRClientProtocolPBClientImpl.java:166)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobCounters(ClientServiceDelegate.java:380)
	at org.apache.hadoop.mapred.YARNRunner.getJobCounters(YARNRunner.java:589)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:761)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:758)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.getCounters(Job.java:758)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData(TestMRJobsWithHistoryService.java:153)



Hadoop-Mapreduce-trunk - Build # 2525 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2525/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 33381 lines...]
Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 14.099 sec - in org.apache.hadoop.mapreduce.TestMapCollection
Running org.apache.hadoop.conf.TestNoDefaultsJobConf
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 23.055 sec - in org.apache.hadoop.conf.TestNoDefaultsJobConf
Running org.apache.hadoop.util.TestMRCJCReflectionUtils
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.645 sec - in org.apache.hadoop.util.TestMRCJCReflectionUtils
Running org.apache.hadoop.util.TestMRCJCRunJar
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.25 sec - in org.apache.hadoop.util.TestMRCJCRunJar

Results :

Failed tests: 
  TestMiniMRClientCluster.testRestart:146 Address before restart: asf905.gq1.ygridcore.net:0 is different from new address: asf905.gq1.ygridcore.net:60178 expected:<...5.gq1.ygridcore.net:[0]> but was:<...5.gq1.ygridcore.net:[60178]>

Tests in error: 
  TestMRJobsWithHistoryService.testJobHistoryData:153 » IO java.net.ConnectExcep...

Tests run: 523, Failures: 1, Errors: 1, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.738 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:41 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 28.823 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.808 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:16 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:39 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:43 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:00 h
[INFO] Finished at: 2015-10-25T09:00:55+00:00
[INFO] Final Memory: 34M/750M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating YARN-3738
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart

Error Message:
Address before restart: asf905.gq1.ygridcore.net:0 is different from new address: asf905.gq1.ygridcore.net:60178 expected:<...5.gq1.ygridcore.net:[0]> but was:<...5.gq1.ygridcore.net:[60178]>

Stack Trace:
org.junit.ComparisonFailure: Address before restart: asf905.gq1.ygridcore.net:0 is different from new address: asf905.gq1.ygridcore.net:60178 expected:<...5.gq1.ygridcore.net:[0]> but was:<...5.gq1.ygridcore.net:[60178]>
	at org.junit.Assert.assertEquals(Assert.java:115)
	at org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart(TestMiniMRClientCluster.java:146)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData

Error Message:
java.net.ConnectException: Call From asf905.gq1.ygridcore.net/67.195.81.149 to asf905.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf905.gq1.ygridcore.net/67.195.81.149 to asf905.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy88.getCounters(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getCounters(MRClientProtocolPBClientImpl.java:166)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobCounters(ClientServiceDelegate.java:380)
	at org.apache.hadoop.mapred.YARNRunner.getJobCounters(YARNRunner.java:589)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:761)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:758)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.getCounters(Job.java:758)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData(TestMRJobsWithHistoryService.java:153)



Hadoop-Mapreduce-trunk - Build # 2524 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2524/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32838 lines...]
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 18.118 sec - in org.apache.hadoop.mapreduce.v2.app.TestFail
Running org.apache.hadoop.mapreduce.v2.app.TestMRApp
Tests run: 13, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 22.682 sec - in org.apache.hadoop.mapreduce.v2.app.TestMRApp
Running org.apache.hadoop.mapred.TestTaskAttemptFinishingMonitor
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.765 sec - in org.apache.hadoop.mapred.TestTaskAttemptFinishingMonitor
Running org.apache.hadoop.mapred.TestTaskAttemptListenerImpl
Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.508 sec - in org.apache.hadoop.mapred.TestTaskAttemptListenerImpl
Running org.apache.hadoop.mapred.TestLocalContainerLauncher
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.14 sec - in org.apache.hadoop.mapred.TestLocalContainerLauncher

Results :

Failed tests: 
  TestTaskAttempt.testMRAppHistoryForTAFailedInAssigned:177->testTaskAttemptAssignedKilledHistory:388 No Ta Started JH Event

Tests run: 339, Failures: 1, Errors: 0, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.065 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:52 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 32.077 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.274 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [09:51 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 12:26 min
[INFO] Finished at: 2015-10-24T13:14:48+00:00
[INFO] Final Memory: 42M/825M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating YARN-4296
Updating YARN-4289
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.mapreduce.v2.app.job.impl.TestTaskAttempt.testMRAppHistoryForTAFailedInAssigned

Error Message:
No Ta Started JH Event

Stack Trace:
java.lang.AssertionError: No Ta Started JH Event
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.assertTrue(Assert.java:41)
	at org.apache.hadoop.mapreduce.v2.app.job.impl.TestTaskAttempt.testTaskAttemptAssignedKilledHistory(TestTaskAttempt.java:388)
	at org.apache.hadoop.mapreduce.v2.app.job.impl.TestTaskAttempt.testMRAppHistoryForTAFailedInAssigned(TestTaskAttempt.java:177)



Hadoop-Mapreduce-trunk - Build # 2523 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2523/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 33732 lines...]
  TestAMWebServices.<init>:104->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServices.<init>:104->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServices.<init>:104->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServices.<init>:104->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServices.<init>:104->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServices.<init>:104->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServices.<init>:104->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServices.<init>:104->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServices.<init>:104->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesAttempt.<init>:130->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesAttempt.<init>:130->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesAttempt.<init>:130->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesAttempt.<init>:130->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer

Tests run: 339, Failures: 0, Errors: 39, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.999 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [02:08 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 36.682 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  6.313 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [10:21 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 13:18 min
[INFO] Finished at: 2015-10-24T04:17:52+00:00
[INFO] Final Memory: 39M/825M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-4015
Updating YARN-4294
Updating HDFS-9297
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
39 tests failed.
FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testInvalidUri2

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testAMXML

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testInfo

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testInfoDefault

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testAM

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testInvalidAccept

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testBlacklistedNodesXML

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testBlacklistedNodes

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testAMDefault

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testInfoXML

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testInvalidUri

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testInfoSlash

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testAMSlash

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt.testGetTaskAttemptIdXMLState

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt.<init>(TestAMWebServicesAttempt.java:130)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt.testPutTaskAttemptIdState

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt.<init>(TestAMWebServicesAttempt.java:130)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt.testGetTaskAttemptIdState

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt.<init>(TestAMWebServicesAttempt.java:130)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt.testPutTaskAttemptIdXMLState

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt.<init>(TestAMWebServicesAttempt.java:130)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobIdNonExist

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.<init>(TestAMWebServicesJobs.java:116)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobAttemptsDefault

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.<init>(TestAMWebServicesJobs.java:116)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTaskIdCounters

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTaskIdBogus

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTaskIdSlash

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testJobTaskCountersXML

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTasksQueryReduce

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTasks

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTasksQueryMap

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTaskIdInvalid2

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTaskIdInvalid3

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTaskIdCountersSlash

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTaskIdDefault

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTaskIdNonExist

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTasksXML

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTaskIdInvalid

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTaskIdXML

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTaskIdCountersDefault

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTasksQueryInvalid

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTasksDefault

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTasksSlash

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTaskId

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)



Hadoop-Mapreduce-trunk - Build # 2522 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2522/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 34001 lines...]
  TestUberAM>TestMRJobs.testSleepJobWithRemoteJar:196->TestMRJobs.testSleepJobInternal:222 » NoClassDefFound
  TestUberAM>TestMRJobs.testConfVerificationWithClassloaderCustomClasses:253->TestMRJobs.testConfVerification:302 » NoClassDefFound
  TestUberAM>TestMRJobs.testConfVerificationWithJobClientLocal:268->TestMRJobs.testConfVerification:292 » NoClassDefFound
  TestUberAM>TestMRJobs.tearDown:175 » NoClassDefFound org/apache/hadoop/service...
  TestMRJobsWithProfiler.setup:85 » NoClassDefFound org/apache/hadoop/yarn/proto...
  TestMRAMWithNonNormalizedCapabilities.setup:72 » YarnRuntime java.lang.NoClass...
  TestMRJobsWithHistoryService.testJobHistoryData:153 » IO java.net.ConnectExcep...
  TestMiniMRClasspath.testClassPath:175 » YarnRuntime java.lang.NoClassDefFoundE...
  TestMiniMRClasspath.testExternalWritable:204 » IllegalState
  TestJobName>ClusterMapReduceTestCase.setUp:56->ClusterMapReduceTestCase.startCluster:86 » YarnRuntime
  TestJobName>ClusterMapReduceTestCase.setUp:56->ClusterMapReduceTestCase.startCluster:81 » FileNotFound
  TestMiniMRWithDFSWithDistinctUsers.setUp:97 » NoClassDefFound org/apache/hadoo...
  TestMiniMRWithDFSWithDistinctUsers.setUp:97 » NoClassDefFound org/apache/hadoo...
  TestYARNRunner.testResourceMgrDelegate:266 » YarnRuntime Failed to load class:...
  TestYARNRunner.testGetHSDelegationToken:333 NoClassDefFound org/apache/hadoop/...

Tests run: 520, Failures: 5, Errors: 15, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.847 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:43 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 30.390 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.169 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:28 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:39 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:25 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 01:43 h
[INFO] Finished at: 2015-10-24T02:34:25+00:00
[INFO] Final Memory: 34M/719M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: ExecutionException: java.lang.RuntimeException: The forked VM terminated without properly saying goodbye. VM crash or System.exit called?
[ERROR] Command was /bin/sh -c cd /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient && /home/jenkins/tools/java/jdk1.7.0_55/jre/bin/java -Xmx2048m -XX:MaxPermSize=768m -XX:+HeapDumpOnOutOfMemoryError -jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefirebooter7828323534123002395.jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefire5899593184469780603tmp /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefire_2678814847105026064047tmp
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9290
Updating HDFS-9301
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
20 tests failed.
FAILED:  org.apache.hadoop.mapred.TestJobName.testComplexNameWithRegex

Error Message:
java.lang.NoClassDefFoundError: org/apache/hadoop/fs/Hdfs$2

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: java.lang.NoClassDefFoundError: org/apache/hadoop/fs/Hdfs$2
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster$JobHistoryServerWrapper.serviceStart(MiniMRYarnCluster.java:226)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.service.CompositeService.serviceStart(CompositeService.java:120)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:80)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase$ConfigurableMiniMRCluster.<init>(ClusterMapReduceTestCase.java:101)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.startCluster(ClusterMapReduceTestCase.java:86)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.setUp(ClusterMapReduceTestCase.java:56)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)
Caused by: java.lang.NoClassDefFoundError: org/apache/hadoop/fs/Hdfs$2
	at org.apache.hadoop.fs.Hdfs.listStatusIterator(Hdfs.java:180)
	at org.apache.hadoop.fs.FileContext$22.next(FileContext.java:1492)
	at org.apache.hadoop.fs.FileContext$22.next(FileContext.java:1487)
	at org.apache.hadoop.fs.FSLinkResolver.resolve(FSLinkResolver.java:90)
	at org.apache.hadoop.fs.FileContext.listStatus(FileContext.java:1487)
	at org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils.localGlobber(JobHistoryUtils.java:456)
	at org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils.localGlobber(JobHistoryUtils.java:444)
	at org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils.localGlobber(JobHistoryUtils.java:439)
	at org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.findTimestampedDirectories(HistoryFileManager.java:778)
	at org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.initExisting(HistoryFileManager.java:672)
	at org.apache.hadoop.mapreduce.v2.hs.JobHistory.serviceInit(JobHistory.java:97)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer.serviceInit(JobHistoryServer.java:151)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster$JobHistoryServerWrapper.serviceStart(MiniMRYarnCluster.java:210)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.service.CompositeService.serviceStart(CompositeService.java:120)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:80)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase$ConfigurableMiniMRCluster.<init>(ClusterMapReduceTestCase.java:101)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.startCluster(ClusterMapReduceTestCase.java:86)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.setUp(ClusterMapReduceTestCase.java:56)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)
Caused by: java.lang.ClassNotFoundException: org.apache.hadoop.fs.Hdfs$2
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.fs.Hdfs.listStatusIterator(Hdfs.java:180)
	at org.apache.hadoop.fs.FileContext$22.next(FileContext.java:1492)
	at org.apache.hadoop.fs.FileContext$22.next(FileContext.java:1487)
	at org.apache.hadoop.fs.FSLinkResolver.resolve(FSLinkResolver.java:90)
	at org.apache.hadoop.fs.FileContext.listStatus(FileContext.java:1487)
	at org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils.localGlobber(JobHistoryUtils.java:456)
	at org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils.localGlobber(JobHistoryUtils.java:444)
	at org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils.localGlobber(JobHistoryUtils.java:439)
	at org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.findTimestampedDirectories(HistoryFileManager.java:778)
	at org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.initExisting(HistoryFileManager.java:672)
	at org.apache.hadoop.mapreduce.v2.hs.JobHistory.serviceInit(JobHistory.java:97)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer.serviceInit(JobHistoryServer.java:151)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster$JobHistoryServerWrapper.serviceStart(MiniMRYarnCluster.java:210)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.service.CompositeService.serviceStart(CompositeService.java:120)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:80)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase$ConfigurableMiniMRCluster.<init>(ClusterMapReduceTestCase.java:101)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.startCluster(ClusterMapReduceTestCase.java:86)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.setUp(ClusterMapReduceTestCase.java:56)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)


FAILED:  org.apache.hadoop.mapred.TestJobName.testComplexName

Error Message:
webapps/hdfs not found in CLASSPATH

Stack Trace:
java.io.FileNotFoundException: webapps/hdfs not found in CLASSPATH
	at org.apache.hadoop.http.HttpServer2.getWebAppsPath(HttpServer2.java:758)
	at org.apache.hadoop.http.HttpServer2.<init>(HttpServer2.java:334)
	at org.apache.hadoop.http.HttpServer2.<init>(HttpServer2.java:114)
	at org.apache.hadoop.http.HttpServer2$Builder.build(HttpServer2.java:290)
	at org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer.start(NameNodeHttpServer.java:126)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.startHttpServer(NameNode.java:771)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:625)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:833)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:812)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1505)
	at org.apache.hadoop.hdfs.MiniDFSCluster.createNameNode(MiniDFSCluster.java:1247)
	at org.apache.hadoop.hdfs.MiniDFSCluster.configureNameService(MiniDFSCluster.java:1016)
	at org.apache.hadoop.hdfs.MiniDFSCluster.createNameNodesAndSetConf(MiniDFSCluster.java:888)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:820)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:479)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:438)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.startCluster(ClusterMapReduceTestCase.java:81)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.setUp(ClusterMapReduceTestCase.java:56)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)


FAILED:  org.apache.hadoop.mapred.TestMiniMRClasspath.testClassPath

Error Message:
java.lang.NoClassDefFoundError: org/apache/hadoop/hdfs/protocol/proto/ClientNamenodeProtocolProtos$GetListingRequestProto$Builder

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: java.lang.NoClassDefFoundError: org/apache/hadoop/hdfs/protocol/proto/ClientNamenodeProtocolProtos$GetListingRequestProto$Builder
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$GetListingRequestProto.newBuilder(ClientNamenodeProtocolProtos.java:29094)
	at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.getListing(ClientNamenodeProtocolTranslatorPB.java:542)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:255)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy20.getListing(Unknown Source)
	at org.apache.hadoop.hdfs.DFSClient.listPaths(DFSClient.java:1575)
	at org.apache.hadoop.fs.Hdfs$DirListingIterator.<init>(Hdfs.java:211)
	at org.apache.hadoop.fs.Hdfs$DirListingIterator.<init>(Hdfs.java:198)
	at org.apache.hadoop.fs.Hdfs$2.<init>(Hdfs.java:180)
	at org.apache.hadoop.fs.Hdfs.listStatusIterator(Hdfs.java:180)
	at org.apache.hadoop.fs.FileContext$22.next(FileContext.java:1492)
	at org.apache.hadoop.fs.FileContext$22.next(FileContext.java:1487)
	at org.apache.hadoop.fs.FSLinkResolver.resolve(FSLinkResolver.java:90)
	at org.apache.hadoop.fs.FileContext.listStatus(FileContext.java:1487)
	at org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils.localGlobber(JobHistoryUtils.java:456)
	at org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils.localGlobber(JobHistoryUtils.java:444)
	at org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils.localGlobber(JobHistoryUtils.java:439)
	at org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.findTimestampedDirectories(HistoryFileManager.java:778)
	at org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.initExisting(HistoryFileManager.java:672)
	at org.apache.hadoop.mapreduce.v2.hs.JobHistory.serviceInit(JobHistory.java:97)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer.serviceInit(JobHistoryServer.java:151)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster$JobHistoryServerWrapper.serviceStart(MiniMRYarnCluster.java:210)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.service.CompositeService.serviceStart(CompositeService.java:120)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:80)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.TestMiniMRClasspath.testClassPath(TestMiniMRClasspath.java:175)


FAILED:  org.apache.hadoop.mapred.TestMiniMRClasspath.testExternalWritable

Error Message:
null

Stack Trace:
java.lang.IllegalStateException: null
	at com.google.common.base.Preconditions.checkState(Preconditions.java:129)
	at org.apache.hadoop.ipc.Client.setCallIdAndRetryCount(Client.java:123)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:100)
	at com.sun.proxy.$Proxy20.getDatanodeReport(Unknown Source)
	at org.apache.hadoop.hdfs.DFSClient.datanodeReport(DFSClient.java:2025)
	at org.apache.hadoop.hdfs.MiniDFSCluster.waitActive(MiniDFSCluster.java:2424)
	at org.apache.hadoop.hdfs.MiniDFSCluster.waitActive(MiniDFSCluster.java:2467)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:1632)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:841)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:479)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:438)
	at org.apache.hadoop.mapred.TestMiniMRClasspath.testExternalWritable(TestMiniMRClasspath.java:204)


FAILED:  org.apache.hadoop.mapred.TestMiniMRWithDFSWithDistinctUsers.testMultipleSpills

Error Message:
org/apache/hadoop/yarn/server/api/AuxiliaryService

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/api/AuxiliaryService
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:172)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.TestMiniMRWithDFSWithDistinctUsers.setUp(TestMiniMRWithDFSWithDistinctUsers.java:97)


FAILED:  org.apache.hadoop.mapred.TestMiniMRWithDFSWithDistinctUsers.testDistinctUsers

Error Message:
org/apache/hadoop/mapred/ShuffleHandler

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/mapred/ShuffleHandler
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:172)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.TestMiniMRWithDFSWithDistinctUsers.setUp(TestMiniMRWithDFSWithDistinctUsers.java:97)


FAILED:  org.apache.hadoop.mapred.TestYARNRunner.testResourceMgrDelegate

Error Message:
Failed to load class: [org.apache.hadoop.yarn.api.protocolrecords.impl.pb.KillApplicationRequestPBImpl]

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: Failed to load class: [org.apache.hadoop.yarn.api.protocolrecords.impl.pb.KillApplicationRequestPBImpl]
	at org.apache.hadoop.conf.Configuration.getClassByName(Configuration.java:2165)
	at org.apache.hadoop.yarn.factories.impl.pb.RecordFactoryPBImpl.newRecordInstance(RecordFactoryPBImpl.java:56)
	at org.apache.hadoop.yarn.util.Records.newRecord(Records.java:36)
	at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.killApplication(YarnClientImpl.java:399)
	at org.apache.hadoop.mapred.ResourceMgrDelegate.killApplication(ResourceMgrDelegate.java:305)
	at org.apache.hadoop.mapred.TestYARNRunner.testResourceMgrDelegate(TestYARNRunner.java:266)


FAILED:  org.apache.hadoop.mapred.TestYARNRunner.testGetHSDelegationToken

Error Message:
org/apache/hadoop/yarn/security/client/RMDelegationTokenIdentifier

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/security/client/RMDelegationTokenIdentifier
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.mapred.TestYARNRunner.testGetHSDelegationToken(TestYARNRunner.java:333)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRAMWithNonNormalizedCapabilities.testJobWithNonNormalizedCapabilities

Error Message:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/MiniYARNCluster$NodeManagerWrapper$1

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/MiniYARNCluster$NodeManagerWrapper$1
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$NodeManagerWrapper.serviceStart(MiniYARNCluster.java:566)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.service.CompositeService.serviceStart(CompositeService.java:120)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.mapreduce.v2.TestMRAMWithNonNormalizedCapabilities.setup(TestMRAMWithNonNormalizedCapabilities.java:72)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler

Error Message:
org/apache/hadoop/yarn/proto/YarnServiceProtos$SchedulerResourceTypes

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/proto/YarnServiceProtos$SchedulerResourceTypes
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.Class.forName0(Native Method)
	at java.lang.Class.forName(Class.java:190)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createScheduler(ResourceManager.java:297)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:491)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:979)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:254)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.setup(TestMRJobsWithProfiler.java:85)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testSleepJobWithRemoteJar

Error Message:
org/apache/hadoop/fs/viewfs/ConfigUtil

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/fs/viewfs/ConfigUtil
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testSleepJobInternal(TestMRJobs.java:222)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testSleepJobWithRemoteJar(TestMRJobs.java:196)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.lang.ClassNotFoundException: org.apache.hadoop.fs.viewfs.ConfigUtil
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testSleepJobInternal(TestMRJobs.java:222)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testSleepJobWithRemoteJar(TestMRJobs.java:196)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testConfVerificationWithClassloaderCustomClasses

Error Message:
org/apache/hadoop/util/ApplicationClassLoader$1

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/util/ApplicationClassLoader$1
	at org.apache.hadoop.util.ApplicationClassLoader.<clinit>(ApplicationClassLoader.java:61)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:302)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithClassloaderCustomClasses(TestMRJobs.java:253)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.lang.ClassNotFoundException: org.apache.hadoop.util.ApplicationClassLoader$1
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.util.ApplicationClassLoader.<clinit>(ApplicationClassLoader.java:61)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:302)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithClassloaderCustomClasses(TestMRJobs.java:253)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testConfVerificationWithOutClassloader

Error Message:
Job status: Application application_1445648989520_0008 failed 2 times due to AM Container for appattempt_1445648989520_0008_000002 exited with  exitCode: -1
Failing this attempt.Diagnostics: fs.AbstractFileSystem.file.impl=null: No AbstractFileSystem configured for scheme: file
For more detailed output, check the application tracking page: http://asf902.gq1.ygridcore.net:52473/cluster/app/application_1445648989520_0008 Then click on links to logs of each attempt.
. Failing the application.

Stack Trace:
java.lang.AssertionError: Job status: Application application_1445648989520_0008 failed 2 times due to AM Container for appattempt_1445648989520_0008_000002 exited with  exitCode: -1
Failing this attempt.Diagnostics: fs.AbstractFileSystem.file.impl=null: No AbstractFileSystem configured for scheme: file
For more detailed output, check the application tracking page: http://asf902.gq1.ygridcore.net:52473/cluster/app/application_1445648989520_0008 Then click on links to logs of each attempt.
. Failing the application.
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.assertTrue(Assert.java:41)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:353)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithOutClassloader(TestMRJobs.java:258)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testConfVerificationWithJobClientLocal

Error Message:
org/apache/hadoop/metrics/Updater

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/metrics/Updater
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.mapred.LocalJobRunner.<init>(LocalJobRunner.java:769)
	at org.apache.hadoop.mapred.LocalJobRunner.<init>(LocalJobRunner.java:762)
	at org.apache.hadoop.mapred.LocalClientProtocolProvider.create(LocalClientProtocolProvider.java:42)
	at org.apache.hadoop.mapreduce.Cluster.initialize(Cluster.java:95)
	at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:82)
	at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:75)
	at org.apache.hadoop.mapred.JobClient.init(JobClient.java:475)
	at org.apache.hadoop.mapred.JobClient.<init>(JobClient.java:465)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:292)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClientLocal(TestMRJobs.java:268)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.lang.ClassNotFoundException: org.apache.hadoop.metrics.Updater
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.mapred.LocalJobRunner.<init>(LocalJobRunner.java:769)
	at org.apache.hadoop.mapred.LocalJobRunner.<init>(LocalJobRunner.java:762)
	at org.apache.hadoop.mapred.LocalClientProtocolProvider.create(LocalClientProtocolProvider.java:42)
	at org.apache.hadoop.mapreduce.Cluster.initialize(Cluster.java:95)
	at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:82)
	at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:75)
	at org.apache.hadoop.mapred.JobClient.init(JobClient.java:475)
	at org.apache.hadoop.mapred.JobClient.<init>(JobClient.java:465)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:292)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClientLocal(TestMRJobs.java:268)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testConfVerificationWithClassloader

Error Message:
Job status: Application application_1445648989520_0010 failed 2 times due to AM Container for appattempt_1445648989520_0010_000002 exited with  exitCode: -1
Failing this attempt.Diagnostics: fs.AbstractFileSystem.file.impl=null: No AbstractFileSystem configured for scheme: file
For more detailed output, check the application tracking page: http://asf902.gq1.ygridcore.net:52473/cluster/app/application_1445648989520_0010 Then click on links to logs of each attempt.
. Failing the application.

Stack Trace:
java.lang.AssertionError: Job status: Application application_1445648989520_0010 failed 2 times due to AM Container for appattempt_1445648989520_0010_000002 exited with  exitCode: -1
Failing this attempt.Diagnostics: fs.AbstractFileSystem.file.impl=null: No AbstractFileSystem configured for scheme: file
For more detailed output, check the application tracking page: http://asf902.gq1.ygridcore.net:52473/cluster/app/application_1445648989520_0010 Then click on links to logs of each attempt.
. Failing the application.
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.assertTrue(Assert.java:41)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:353)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithClassloader(TestMRJobs.java:247)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.org.apache.hadoop.mapreduce.v2.TestUberAM

Error Message:
org/apache/hadoop/service/ServiceOperations

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/service/ServiceOperations
	at org.apache.hadoop.service.CompositeService.stop(CompositeService.java:157)
	at org.apache.hadoop.service.CompositeService.serviceStop(CompositeService.java:131)
	at org.apache.hadoop.service.AbstractService.stop(AbstractService.java:221)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.tearDown(TestMRJobs.java:175)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33)
	at org.junit.runners.ParentRunner.run(ParentRunner.java:309)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)
Caused by: java.lang.ClassNotFoundException: org.apache.hadoop.service.ServiceOperations
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.service.CompositeService.stop(CompositeService.java:157)
	at org.apache.hadoop.service.CompositeService.serviceStop(CompositeService.java:131)
	at org.apache.hadoop.service.AbstractService.stop(AbstractService.java:221)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.tearDown(TestMRJobs.java:175)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33)
	at org.junit.runners.ParentRunner.run(ParentRunner.java:309)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testConfVerificationWithJobClient

Error Message:
Job status: Application application_1445648989520_0007 failed 2 times due to AM Container for appattempt_1445648989520_0007_000002 exited with  exitCode: -1
Failing this attempt.Diagnostics: fs.AbstractFileSystem.file.impl=null: No AbstractFileSystem configured for scheme: file
For more detailed output, check the application tracking page: http://asf902.gq1.ygridcore.net:52473/cluster/app/application_1445648989520_0007 Then click on links to logs of each attempt.
. Failing the application.

Stack Trace:
java.lang.AssertionError: Job status: Application application_1445648989520_0007 failed 2 times due to AM Container for appattempt_1445648989520_0007_000002 exited with  exitCode: -1
Failing this attempt.Diagnostics: fs.AbstractFileSystem.file.impl=null: No AbstractFileSystem configured for scheme: file
For more detailed output, check the application tracking page: http://asf902.gq1.ygridcore.net:52473/cluster/app/application_1445648989520_0007 Then click on links to logs of each attempt.
. Failing the application.
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.assertTrue(Assert.java:41)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:353)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient(TestMRJobs.java:263)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testContainerRollingLog

Error Message:
expected:<FINISHED> but was:<FAILED>

Stack Trace:
java.lang.AssertionError: expected:<FINISHED> but was:<FAILED>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:144)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog(TestMRJobs.java:669)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart

Error Message:
Address before restart: asf902.gq1.ygridcore.net:0 is different from new address: asf902.gq1.ygridcore.net:60748 expected:<...2.gq1.ygridcore.net:[0]> but was:<...2.gq1.ygridcore.net:[60748]>

Stack Trace:
org.junit.ComparisonFailure: Address before restart: asf902.gq1.ygridcore.net:0 is different from new address: asf902.gq1.ygridcore.net:60748 expected:<...2.gq1.ygridcore.net:[0]> but was:<...2.gq1.ygridcore.net:[60748]>
	at org.junit.Assert.assertEquals(Assert.java:115)
	at org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart(TestMiniMRClientCluster.java:146)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData

Error Message:
java.net.ConnectException: Call From asf902.gq1.ygridcore.net/67.195.81.146 to asf902.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf902.gq1.ygridcore.net/67.195.81.146 to asf902.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy88.getCounters(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getCounters(MRClientProtocolPBClientImpl.java:166)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobCounters(ClientServiceDelegate.java:380)
	at org.apache.hadoop.mapred.YARNRunner.getJobCounters(YARNRunner.java:589)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:761)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:758)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.getCounters(Job.java:758)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData(TestMRJobsWithHistoryService.java:153)



Hadoop-Mapreduce-trunk - Build # 2521 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2521/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 33737 lines...]
Results :

Failed tests: 
  TestMiniMRClientCluster.testRestart:146 Address before restart: asf903.gq1.ygridcore.net:0 is different from new address: asf903.gq1.ygridcore.net:57355 expected:<...3.gq1.ygridcore.net:[0]> but was:<...3.gq1.ygridcore.net:[57355]>

Tests in error: 
  TestMRJobsWithHistoryService.testJobHistoryData:153 » IO java.net.ConnectExcep...
  TestUberAM.testFailingMapper:132 » IO java.net.ConnectException: Call From asf...
  TestUberAM>TestMRJobs.testDistributedCache:926->TestMRJobs._testDistributedCache:906 » IO
  TestUberAM>TestMRJobs.testConfVerificationWithJobClient:263->TestMRJobs.testConfVerification:348 » IO
  TestUberAM>TestMRJobs.testContainerRollingLog:694 » IO java.net.ConnectExcepti...

Tests run: 523, Failures: 1, Errors: 5, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.807 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:46 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 30.235 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  4.849 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:35 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:41 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:44 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:02 h
[INFO] Finished at: 2015-10-23T23:38:32+00:00
[INFO] Final Memory: 36M/729M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-8808
Updating YARN-4041
Updating HDFS-9264
Updating HDFS-9184
Updating MAPREDUCE-6508
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
6 tests failed.
FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testFailingMapper

Error Message:
java.net.ConnectException: Call From asf903.gq1.ygridcore.net/67.195.81.147 to asf903.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf903.gq1.ygridcore.net/67.195.81.147 to asf903.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getTaskAttemptCompletionEvents(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getTaskAttemptCompletionEvents(MRClientProtocolPBClientImpl.java:177)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getTaskCompletionEvents(ClientServiceDelegate.java:395)
	at org.apache.hadoop.mapred.YARNRunner.getTaskCompletionEvents(YARNRunner.java:607)
	at org.apache.hadoop.mapreduce.Job$5.run(Job.java:673)
	at org.apache.hadoop.mapreduce.Job$5.run(Job.java:670)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.getTaskCompletionEvents(Job.java:670)
	at org.apache.hadoop.mapreduce.v2.TestUberAM.testFailingMapper(TestUberAM.java:132)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testDistributedCache

Error Message:
java.net.ConnectException: Call From asf903.gq1.ygridcore.net/67.195.81.147 to asf903.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf903.gq1.ygridcore.net/67.195.81.147 to asf903.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:343)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs._testDistributedCache(TestMRJobs.java:906)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testDistributedCache(TestMRJobs.java:926)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Call From asf903.gq1.ygridcore.net/67.195.81.147 to asf903.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
	at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:792)
	at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:732)
	at org.apache.hadoop.ipc.Client.call(Client.java:1447)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs._testDistributedCache(TestMRJobs.java:906)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testDistributedCache(TestMRJobs.java:926)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Connection refused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs._testDistributedCache(TestMRJobs.java:906)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testDistributedCache(TestMRJobs.java:926)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testConfVerificationWithJobClient

Error Message:
java.net.ConnectException: Call From asf903.gq1.ygridcore.net/67.195.81.147 to asf903.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf903.gq1.ygridcore.net/67.195.81.147 to asf903.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:343)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapred.JobClient$NetworkedJob.waitForCompletion(JobClient.java:287)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:348)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient(TestMRJobs.java:263)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Call From asf903.gq1.ygridcore.net/67.195.81.147 to asf903.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
	at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:792)
	at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:732)
	at org.apache.hadoop.ipc.Client.call(Client.java:1447)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapred.JobClient$NetworkedJob.waitForCompletion(JobClient.java:287)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:348)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient(TestMRJobs.java:263)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Connection refused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapred.JobClient$NetworkedJob.waitForCompletion(JobClient.java:287)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:348)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient(TestMRJobs.java:263)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testContainerRollingLog

Error Message:
java.net.ConnectException: Call From asf903.gq1.ygridcore.net/67.195.81.147 to asf903.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf903.gq1.ygridcore.net/67.195.81.147 to asf903.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:343)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isUber(Job.java:1479)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog(TestMRJobs.java:694)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Call From asf903.gq1.ygridcore.net/67.195.81.147 to asf903.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
	at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:792)
	at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:732)
	at org.apache.hadoop.ipc.Client.call(Client.java:1447)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isUber(Job.java:1479)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog(TestMRJobs.java:694)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Connection refused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isUber(Job.java:1479)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog(TestMRJobs.java:694)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart

Error Message:
Address before restart: asf903.gq1.ygridcore.net:0 is different from new address: asf903.gq1.ygridcore.net:57355 expected:<...3.gq1.ygridcore.net:[0]> but was:<...3.gq1.ygridcore.net:[57355]>

Stack Trace:
org.junit.ComparisonFailure: Address before restart: asf903.gq1.ygridcore.net:0 is different from new address: asf903.gq1.ygridcore.net:57355 expected:<...3.gq1.ygridcore.net:[0]> but was:<...3.gq1.ygridcore.net:[57355]>
	at org.junit.Assert.assertEquals(Assert.java:115)
	at org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart(TestMiniMRClientCluster.java:146)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData

Error Message:
java.net.ConnectException: Call From asf903.gq1.ygridcore.net/67.195.81.147 to asf903.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf903.gq1.ygridcore.net/67.195.81.147 to asf903.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy88.getCounters(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getCounters(MRClientProtocolPBClientImpl.java:166)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobCounters(ClientServiceDelegate.java:380)
	at org.apache.hadoop.mapred.YARNRunner.getJobCounters(YARNRunner.java:589)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:761)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:758)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.getCounters(Job.java:758)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData(TestMRJobsWithHistoryService.java:153)



Hadoop-Mapreduce-trunk - Build # 2520 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2520/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 33548 lines...]
Running org.apache.hadoop.io.TestSequenceFileMergeProgress
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.828 sec - in org.apache.hadoop.io.TestSequenceFileMergeProgress

Results :

Failed tests: 
  TestMiniMRClientCluster.testRestart:146 Address before restart: asf906.gq1.ygridcore.net:0 is different from new address: asf906.gq1.ygridcore.net:54037 expected:<...6.gq1.ygridcore.net:[0]> but was:<...6.gq1.ygridcore.net:[54037]>

Tests in error: 
  TestNetworkedJob.testNetworkedJob:260 » IO org.apache.hadoop.yarn.exceptions.Y...
  TestNonExistentJob.testGetInvalidJob:93 » IO java.net.ConnectException: Call F...
  TestMRAMWithNonNormalizedCapabilities.setup:76 » NoClassDefFound org/apache/ha...
  TestMRAMWithNonNormalizedCapabilities.tearDown:118 » NoClassDefFound org/apach...
  TestUberAM.setup:45->TestMRJobs.setup:162 » NoClassDefFound org/apache/hadoop/...
  TestMRAppWithCombiner.setup:70 NoClassDefFound org/apache/hadoop/yarn/server/r...
  TestMRJobs.setup:150 NoClassDefFound org/apache/hadoop/yarn/server/MiniYARNClu...
  TestMRJobsWithHistoryService.testJobHistoryData:153 » IO java.net.ConnectExcep...

Tests run: 499, Failures: 1, Errors: 8, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.284 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:56 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 33.561 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.670 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [10:03 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:56 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  02:01 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:19 h
[INFO] Finished at: 2015-10-23T19:23:01+00:00
[INFO] Final Memory: 39M/748M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There was a timeout or other error in the fork -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating YARN-4009
Updating YARN-2913
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
9 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart

Error Message:
Address before restart: asf906.gq1.ygridcore.net:0 is different from new address: asf906.gq1.ygridcore.net:54037 expected:<...6.gq1.ygridcore.net:[0]> but was:<...6.gq1.ygridcore.net:[54037]>

Stack Trace:
org.junit.ComparisonFailure: Address before restart: asf906.gq1.ygridcore.net:0 is different from new address: asf906.gq1.ygridcore.net:54037 expected:<...6.gq1.ygridcore.net:[0]> but was:<...6.gq1.ygridcore.net:[54037]>
	at org.junit.Assert.assertEquals(Assert.java:115)
	at org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart(TestMiniMRClientCluster.java:146)


FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob

Error Message:
org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
 at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
 at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
 at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2285)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
 ... 10 more


Stack Trace:
java.io.IOException: org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
	at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
	at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2285)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
	... 10 more

	at org.apache.hadoop.ipc.Client.call(Client.java:1443)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy84.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getDelegationToken(ApplicationClientProtocolPBClientImpl.java:339)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:255)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy85.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getRMDelegationToken(YarnClientImpl.java:541)
	at org.apache.hadoop.mapred.ResourceMgrDelegate.getDelegationToken(ResourceMgrDelegate.java:177)
	at org.apache.hadoop.mapred.YARNRunner.getDelegationToken(YARNRunner.java:231)
	at org.apache.hadoop.mapreduce.Cluster.getDelegationToken(Cluster.java:401)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1234)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1231)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapred.JobClient.getDelegationToken(JobClient.java:1230)
	at org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob(TestNetworkedJob.java:260)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRAMWithNonNormalizedCapabilities.testJobWithNonNormalizedCapabilities

Error Message:
org/apache/hadoop/fs/ChecksumException

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/fs/ChecksumException
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.fs.ChecksumFileSystem.open(ChecksumFileSystem.java:341)
	at org.apache.hadoop.fs.FileSystem.open(FileSystem.java:771)
	at org.apache.hadoop.fs.FileUtil.copy(FileUtil.java:367)
	at org.apache.hadoop.fs.FileUtil.copy(FileUtil.java:341)
	at org.apache.hadoop.fs.FileUtil.copy(FileUtil.java:292)
	at org.apache.hadoop.fs.LocalFileSystem.copyFromLocalFile(LocalFileSystem.java:82)
	at org.apache.hadoop.fs.FileSystem.copyFromLocalFile(FileSystem.java:1910)
	at org.apache.hadoop.mapreduce.v2.TestMRAMWithNonNormalizedCapabilities.setup(TestMRAMWithNonNormalizedCapabilities.java:76)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRAMWithNonNormalizedCapabilities.testJobWithNonNormalizedCapabilities

Error Message:
org/apache/hadoop/service/ServiceOperations

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/service/ServiceOperations
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.service.CompositeService.stop(CompositeService.java:157)
	at org.apache.hadoop.service.CompositeService.serviceStop(CompositeService.java:131)
	at org.apache.hadoop.service.AbstractService.stop(AbstractService.java:221)
	at org.apache.hadoop.mapreduce.v2.TestMRAMWithNonNormalizedCapabilities.tearDown(TestMRAMWithNonNormalizedCapabilities.java:118)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRAppWithCombiner.org.apache.hadoop.mapreduce.v2.TestMRAppWithCombiner

Error Message:
org/apache/hadoop/yarn/server/resourcemanager/ResourceManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/resourcemanager/ResourceManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.mapreduce.v2.TestMRAppWithCombiner.setup(TestMRAppWithCombiner.java:70)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobs.org.apache.hadoop.mapreduce.v2.TestMRJobs

Error Message:
org/apache/hadoop/yarn/server/MiniYARNCluster$CustomNodeManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/MiniYARNCluster$CustomNodeManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.setup(TestMRJobs.java:150)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData

Error Message:
java.net.ConnectException: Call From asf906.gq1.ygridcore.net/67.195.81.150 to asf906.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf906.gq1.ygridcore.net/67.195.81.150 to asf906.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy88.getCounters(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getCounters(MRClientProtocolPBClientImpl.java:166)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobCounters(ClientServiceDelegate.java:380)
	at org.apache.hadoop.mapred.YARNRunner.getJobCounters(YARNRunner.java:589)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:761)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:758)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.getCounters(Job.java:758)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData(TestMRJobsWithHistoryService.java:153)


FAILED:  org.apache.hadoop.mapreduce.v2.TestNonExistentJob.testGetInvalidJob

Error Message:
java.net.ConnectException: Call From asf906.gq1.ygridcore.net/67.195.81.150 to asf906.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf906.gq1.ygridcore.net/67.195.81.150 to asf906.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy93.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Cluster.getJob(Cluster.java:184)
	at org.apache.hadoop.mapred.JobClient$2.run(JobClient.java:593)
	at org.apache.hadoop.mapred.JobClient$2.run(JobClient.java:591)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapred.JobClient.getJobUsingCluster(JobClient.java:591)
	at org.apache.hadoop.mapred.JobClient.getJobInner(JobClient.java:601)
	at org.apache.hadoop.mapred.JobClient.getJob(JobClient.java:631)
	at org.apache.hadoop.mapreduce.v2.TestNonExistentJob.testGetInvalidJob(TestNonExistentJob.java:93)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.org.apache.hadoop.mapreduce.v2.TestUberAM

Error Message:
org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryWriter

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryWriter
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMApplicationHistoryWriter(ResourceManager.java:362)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.setup(TestMRJobs.java:162)
	at org.apache.hadoop.mapreduce.v2.TestUberAM.setup(TestUberAM.java:45)



Hadoop-Mapreduce-trunk - Build # 2519 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2519/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 34200 lines...]
  TestAMWebServices.<init>:104->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServices.<init>:104->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServices.<init>:104->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServices.<init>:104->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServices.<init>:104->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServices.<init>:104->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServices.<init>:104->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServices.<init>:104->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServices.<init>:104->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServices.<init>:104->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesAttempt.<init>:130->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesAttempt.<init>:130->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesAttempt.<init>:130->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesAttempt.<init>:130->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesJobConf.<init>:151->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer

Tests run: 339, Failures: 0, Errors: 59, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.912 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [02:14 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 37.153 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  6.356 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [09:54 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 12:58 min
[INFO] Finished at: 2015-10-23T16:20:02+00:00
[INFO] Final Memory: 40M/825M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HADOOP-7266
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
59 tests failed.
FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testInvalidUri2

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testAMXML

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testInfo

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testInfoDefault

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testAM

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testInvalidAccept

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testBlacklistedNodesXML

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testBlacklistedNodes

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testAMDefault

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testInfoXML

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testInvalidUri

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testInfoSlash

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.testAMSlash

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices.<init>(TestAMWebServices.java:104)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt.testGetTaskAttemptIdXMLState

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt.<init>(TestAMWebServicesAttempt.java:130)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt.testPutTaskAttemptIdState

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt.<init>(TestAMWebServicesAttempt.java:130)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt.testGetTaskAttemptIdState

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt.<init>(TestAMWebServicesAttempt.java:130)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt.testPutTaskAttemptIdXMLState

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt.<init>(TestAMWebServicesAttempt.java:130)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobConf.testJobConf

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobConf.<init>(TestAMWebServicesJobConf.java:151)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobCountersXML

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.<init>(TestAMWebServicesJobs.java:116)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobAttemptsXML

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.<init>(TestAMWebServicesJobs.java:116)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobsXML

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.<init>(TestAMWebServicesJobs.java:116)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobsDefault

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.<init>(TestAMWebServicesJobs.java:116)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobsSlash

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.<init>(TestAMWebServicesJobs.java:116)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobId

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.<init>(TestAMWebServicesJobs.java:116)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobs

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.<init>(TestAMWebServicesJobs.java:116)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobIdXML

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.<init>(TestAMWebServicesJobs.java:116)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobIdInvalidXML

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.<init>(TestAMWebServicesJobs.java:116)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobIdInvalidBogus

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.<init>(TestAMWebServicesJobs.java:116)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobAttemptsSlash

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.<init>(TestAMWebServicesJobs.java:116)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobIdSlash

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.<init>(TestAMWebServicesJobs.java:116)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobCountersSlash

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.<init>(TestAMWebServicesJobs.java:116)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobIdDefault

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.<init>(TestAMWebServicesJobs.java:116)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobCounters

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.<init>(TestAMWebServicesJobs.java:116)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobIdInvalid

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.<init>(TestAMWebServicesJobs.java:116)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobAttempts

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.<init>(TestAMWebServicesJobs.java:116)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobIdInvalidDefault

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.<init>(TestAMWebServicesJobs.java:116)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobCountersDefault

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.<init>(TestAMWebServicesJobs.java:116)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobIdNonExist

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.<init>(TestAMWebServicesJobs.java:116)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobAttemptsDefault

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.<init>(TestAMWebServicesJobs.java:116)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTaskIdCounters

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTaskIdBogus

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTaskIdSlash

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testJobTaskCountersXML

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTasksQueryReduce

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTasks

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTasksQueryMap

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTaskIdInvalid2

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTaskIdInvalid3

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTaskIdCountersSlash

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTaskIdDefault

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTaskIdNonExist

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTasksXML

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTaskIdInvalid

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTaskIdXML

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTaskIdCountersDefault

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTasksQueryInvalid

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTasksDefault

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTasksSlash

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.testTaskId

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks.<init>(TestAMWebServicesTasks.java:111)



Hadoop-Mapreduce-trunk - Build # 2518 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2518/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 33441 lines...]
Tests run: 1, Failures: 0, Errors: 0, Skipped: 1, Time elapsed: 0.061 sec - in org.apache.hadoop.mapred.TestMiniMRDFSCaching
Running org.apache.hadoop.io.TestSequenceFileMergeProgress
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.638 sec - in org.apache.hadoop.io.TestSequenceFileMergeProgress
Running org.apache.hadoop.conf.TestNoDefaultsJobConf
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 22.973 sec - in org.apache.hadoop.conf.TestNoDefaultsJobConf

Results :

Failed tests: 
  TestMiniMRClientCluster.testRestart:146 Address before restart: asf901.gq1.ygridcore.net:0 is different from new address: asf901.gq1.ygridcore.net:58870 expected:<...1.gq1.ygridcore.net:[]0> but was:<...1.gq1.ygridcore.net:[5887]0>

Tests in error: 
  TestMRJobsWithHistoryService.testJobHistoryData:153 » IO java.net.ConnectExcep...
  TestNetworkedJob.testNetworkedJob:260 » IO org.apache.hadoop.yarn.exceptions.Y...

Tests run: 523, Failures: 1, Errors: 2, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.176 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:54 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 33.502 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.670 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:58 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:55 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:46 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:04 h
[INFO] Finished at: 2015-10-23T03:40:04+00:00
[INFO] Final Memory: 34M/693M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HADOOP-9692
Updating HDFS-9286
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
3 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart

Error Message:
Address before restart: asf901.gq1.ygridcore.net:0 is different from new address: asf901.gq1.ygridcore.net:58870 expected:<...1.gq1.ygridcore.net:[]0> but was:<...1.gq1.ygridcore.net:[5887]0>

Stack Trace:
org.junit.ComparisonFailure: Address before restart: asf901.gq1.ygridcore.net:0 is different from new address: asf901.gq1.ygridcore.net:58870 expected:<...1.gq1.ygridcore.net:[]0> but was:<...1.gq1.ygridcore.net:[5887]0>
	at org.junit.Assert.assertEquals(Assert.java:115)
	at org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart(TestMiniMRClientCluster.java:146)


FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob

Error Message:
org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
 at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
 at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
 at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2285)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
 ... 10 more


Stack Trace:
java.io.IOException: org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
	at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
	at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2285)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
	... 10 more

	at org.apache.hadoop.ipc.Client.call(Client.java:1443)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy84.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getDelegationToken(ApplicationClientProtocolPBClientImpl.java:339)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:255)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy85.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getRMDelegationToken(YarnClientImpl.java:541)
	at org.apache.hadoop.mapred.ResourceMgrDelegate.getDelegationToken(ResourceMgrDelegate.java:177)
	at org.apache.hadoop.mapred.YARNRunner.getDelegationToken(YARNRunner.java:231)
	at org.apache.hadoop.mapreduce.Cluster.getDelegationToken(Cluster.java:401)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1234)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1231)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapred.JobClient.getDelegationToken(JobClient.java:1230)
	at org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob(TestNetworkedJob.java:260)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData

Error Message:
java.net.ConnectException: Call From asf901.gq1.ygridcore.net/67.195.81.145 to asf901.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf901.gq1.ygridcore.net/67.195.81.145 to asf901.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy88.getCounters(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getCounters(MRClientProtocolPBClientImpl.java:166)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobCounters(ClientServiceDelegate.java:380)
	at org.apache.hadoop.mapred.YARNRunner.getJobCounters(YARNRunner.java:589)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:761)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:758)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.getCounters(Job.java:758)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData(TestMRJobsWithHistoryService.java:153)



Hadoop-Mapreduce-trunk - Build # 2517 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2517/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 33499 lines...]
Results :

Failed tests: 
  TestJobCleanup.testCustomCleanup:316->testFailedJob:202 File /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/test-dir/test-job-cleanup/output-1/_custom_cleanup missing for failed job job_1445554953381_0002
  TestJobCleanup.testDefaultCleanupAndAbort:268->testSuccessfulJob:171 Done file "/home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/test-dir/test-job-cleanup/output-2/_SUCCESS" missing for job job_1445554953381_0003
  TestMiniMRClientCluster.testRestart:146 Address before restart: asf906.gq1.ygridcore.net:0 is different from new address: asf906.gq1.ygridcore.net:47565 expected:<...6.gq1.ygridcore.net:[0]> but was:<...6.gq1.ygridcore.net:[47565]>

Tests in error: 
  TestJobCleanup.tearDown:91 » YarnRuntime java.lang.reflect.InvocationTargetExc...
  TestNetworkedJob.testNetworkedJob:260 » IO org.apache.hadoop.yarn.exceptions.Y...
  TestMRJobsWithHistoryService.testJobHistoryData:153 » IO java.net.ConnectExcep...

Tests run: 524, Failures: 3, Errors: 3, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.712 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:43 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 31.677 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  6.152 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:30 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:42 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:44 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:01 h
[INFO] Finished at: 2015-10-23T00:24:57+00:00
[INFO] Final Memory: 35M/719M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9280
Updating HADOOP-12484
Updating HADOOP-12334
Updating YARN-4243
Updating YARN-4256
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
6 tests failed.
FAILED:  org.apache.hadoop.mapred.TestJobCleanup.testCustomCleanup

Error Message:
File /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/test-dir/test-job-cleanup/output-1/_custom_cleanup missing for failed job job_1445554953381_0002

Stack Trace:
java.lang.AssertionError: File /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/test-dir/test-job-cleanup/output-1/_custom_cleanup missing for failed job job_1445554953381_0002
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.assertTrue(Assert.java:41)
	at org.apache.hadoop.mapred.TestJobCleanup.testFailedJob(TestJobCleanup.java:202)
	at org.apache.hadoop.mapred.TestJobCleanup.testCustomCleanup(TestJobCleanup.java:316)


FAILED:  org.apache.hadoop.mapred.TestJobCleanup.testDefaultCleanupAndAbort

Error Message:
Done file "/home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/test-dir/test-job-cleanup/output-2/_SUCCESS" missing for job job_1445554953381_0003

Stack Trace:
java.lang.AssertionError: Done file "/home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/test-dir/test-job-cleanup/output-2/_SUCCESS" missing for job job_1445554953381_0003
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.assertTrue(Assert.java:41)
	at org.apache.hadoop.mapred.TestJobCleanup.testSuccessfulJob(TestJobCleanup.java:171)
	at org.apache.hadoop.mapred.TestJobCleanup.testDefaultCleanupAndAbort(TestJobCleanup.java:268)


FAILED:  org.apache.hadoop.mapred.TestJobCleanup.org.apache.hadoop.mapred.TestJobCleanup

Error Message:
java.lang.reflect.InvocationTargetException

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: java.lang.reflect.InvocationTargetException
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos$UnRegisterNodeManagerRequestProto.<clinit>(YarnServerCommonServiceProtos.java:4118)
	at org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UnRegisterNodeManagerRequestPBImpl.<init>(UnRegisterNodeManagerRequestPBImpl.java:33)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
	at org.apache.hadoop.yarn.factories.impl.pb.RecordFactoryPBImpl.newRecordInstance(RecordFactoryPBImpl.java:70)
	at org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl.unRegisterNM(NodeStatusUpdaterImpl.java:263)
	at org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl.serviceStop(NodeStatusUpdaterImpl.java:245)
	at org.apache.hadoop.service.AbstractService.stop(AbstractService.java:221)
	at org.apache.hadoop.service.ServiceOperations.stop(ServiceOperations.java:52)
	at org.apache.hadoop.service.ServiceOperations.stopQuietly(ServiceOperations.java:80)
	at org.apache.hadoop.service.CompositeService.stop(CompositeService.java:157)
	at org.apache.hadoop.service.CompositeService.serviceStop(CompositeService.java:131)
	at org.apache.hadoop.yarn.server.nodemanager.NodeManager.serviceStop(NodeManager.java:373)
	at org.apache.hadoop.service.AbstractService.stop(AbstractService.java:221)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$NodeManagerWrapper.serviceStop(MiniYARNCluster.java:590)
	at org.apache.hadoop.service.AbstractService.stop(AbstractService.java:221)
	at org.apache.hadoop.service.ServiceOperations.stop(ServiceOperations.java:52)
	at org.apache.hadoop.service.ServiceOperations.stopQuietly(ServiceOperations.java:80)
	at org.apache.hadoop.service.CompositeService.stop(CompositeService.java:157)
	at org.apache.hadoop.service.CompositeService.serviceStop(CompositeService.java:131)
	at org.apache.hadoop.service.AbstractService.stop(AbstractService.java:221)
	at org.apache.hadoop.mapred.MiniMRYarnClusterAdapter.stop(MiniMRYarnClusterAdapter.java:55)
	at org.apache.hadoop.mapred.MiniMRCluster.shutdown(MiniMRCluster.java:267)
	at org.apache.hadoop.mapred.TestJobCleanup.tearDown(TestJobCleanup.java:91)


FAILED:  org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart

Error Message:
Address before restart: asf906.gq1.ygridcore.net:0 is different from new address: asf906.gq1.ygridcore.net:47565 expected:<...6.gq1.ygridcore.net:[0]> but was:<...6.gq1.ygridcore.net:[47565]>

Stack Trace:
org.junit.ComparisonFailure: Address before restart: asf906.gq1.ygridcore.net:0 is different from new address: asf906.gq1.ygridcore.net:47565 expected:<...6.gq1.ygridcore.net:[0]> but was:<...6.gq1.ygridcore.net:[47565]>
	at org.junit.Assert.assertEquals(Assert.java:115)
	at org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart(TestMiniMRClientCluster.java:146)


FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob

Error Message:
org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
 at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
 at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
 at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2287)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
 ... 10 more


Stack Trace:
java.io.IOException: org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
	at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
	at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2287)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
	... 10 more

	at org.apache.hadoop.ipc.Client.call(Client.java:1443)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy84.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getDelegationToken(ApplicationClientProtocolPBClientImpl.java:339)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:255)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy85.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getRMDelegationToken(YarnClientImpl.java:542)
	at org.apache.hadoop.mapred.ResourceMgrDelegate.getDelegationToken(ResourceMgrDelegate.java:177)
	at org.apache.hadoop.mapred.YARNRunner.getDelegationToken(YARNRunner.java:231)
	at org.apache.hadoop.mapreduce.Cluster.getDelegationToken(Cluster.java:401)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1234)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1231)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapred.JobClient.getDelegationToken(JobClient.java:1230)
	at org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob(TestNetworkedJob.java:260)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData

Error Message:
java.net.ConnectException: Call From asf906.gq1.ygridcore.net/67.195.81.150 to asf906.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf906.gq1.ygridcore.net/67.195.81.150 to asf906.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy88.getCounters(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getCounters(MRClientProtocolPBClientImpl.java:166)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobCounters(ClientServiceDelegate.java:380)
	at org.apache.hadoop.mapred.YARNRunner.getJobCounters(YARNRunner.java:589)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:761)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:758)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.getCounters(Job.java:758)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData(TestMRJobsWithHistoryService.java:153)



Hadoop-Mapreduce-trunk - Build # 2516 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2516/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 33964 lines...]

Failed tests: 
  TestMiniMRClientCluster.testRestart:146 Address before restart: asf904.gq1.ygridcore.net:0 is different from new address: asf904.gq1.ygridcore.net:45055 expected:<...4.gq1.ygridcore.net:[0]> but was:<...4.gq1.ygridcore.net:[45055]>

Tests in error: 
  TestUberAM.testFailingMapper:132 » IO java.net.ConnectException: Call From asf...
  TestUberAM>TestMRJobs.testDistributedCache:916->TestMRJobs._testDistributedCache:906 » IO
  TestUberAM>TestMRJobs.testConfVerificationWithJobClient:263->TestMRJobs.testConfVerification:348 » IO
  TestUberAM>TestMRJobs.testContainerRollingLog:694 » IO java.net.ConnectExcepti...
  TestNonExistentJob.testGetInvalidJob:93 » IO java.net.ConnectException: Call F...
  TestMRJobsWithHistoryService.testJobHistoryData:153 » IO java.net.ConnectExcep...
  TestMRJobs.testConfVerificationWithJobClient:263->testConfVerification:353 » IO
  TestMRJobs.testContainerRollingLog:694 » IO java.net.ConnectException: Call Fr...
  TestNetworkedJob.testNetworkedJob:260 » IO org.apache.hadoop.yarn.exceptions.Y...
  TestReduceFetchFromPartialMem$1>TestSetup.run:27->setUp:61 » YarnRuntime java....
  TestLazyOutput.testLazyOutput:147 » NoClassDefFound org/apache/hadoop/yarn/eve...

Tests run: 518, Failures: 1, Errors: 11, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.871 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:41 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 30.121 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  4.903 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:27 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [06:08 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:48 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:06 h
[INFO] Finished at: 2015-10-22T20:51:55+00:00
[INFO] Final Memory: 34M/750M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: ExecutionException: java.lang.RuntimeException: The forked VM terminated without properly saying goodbye. VM crash or System.exit called?
[ERROR] Command was /bin/sh -c cd /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient && /home/jenkins/tools/java/jdk1.7.0_55/jre/bin/java -Xmx2048m -XX:MaxPermSize=768m -XX:+HeapDumpOnOutOfMemoryError -jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefirebooter8639862971787429734.jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefire3518520682463758828tmp /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefire_2836232141511579975068tmp
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HADOOP-12436
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
12 tests failed.
FAILED:  org.apache.hadoop.mapred.TestLazyOutput.testLazyOutput

Error Message:
org/apache/hadoop/yarn/event/EventHandler

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/event/EventHandler
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:58)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.TestLazyOutput.testLazyOutput(TestLazyOutput.java:147)


FAILED:  org.apache.hadoop.mapred.TestReduceFetchFromPartialMem$1.org.apache.hadoop.mapred.TestReduceFetchFromPartialMem

Error Message:
java.io.IOException: NodeManager 0 failed to start

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: java.io.IOException: NodeManager 0 failed to start
	at org.apache.hadoop.yarn.server.MiniYARNCluster$NodeManagerWrapper.serviceStart(MiniYARNCluster.java:579)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.service.CompositeService.serviceStart(CompositeService.java:120)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:80)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.TestReduceFetchFromPartialMem$1.setUp(TestReduceFetchFromPartialMem.java:61)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient

Error Message:
java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor80.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.getStatus(Job.java:338)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:353)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient(TestMRJobs.java:263)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog

Error Message:
java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor80.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isUber(Job.java:1479)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog(TestMRJobs.java:694)


FAILED:  org.apache.hadoop.mapreduce.v2.TestNonExistentJob.testGetInvalidJob

Error Message:
java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy93.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Cluster.getJob(Cluster.java:184)
	at org.apache.hadoop.mapred.JobClient$2.run(JobClient.java:593)
	at org.apache.hadoop.mapred.JobClient$2.run(JobClient.java:591)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapred.JobClient.getJobUsingCluster(JobClient.java:591)
	at org.apache.hadoop.mapred.JobClient.getJobInner(JobClient.java:601)
	at org.apache.hadoop.mapred.JobClient.getJob(JobClient.java:631)
	at org.apache.hadoop.mapreduce.v2.TestNonExistentJob.testGetInvalidJob(TestNonExistentJob.java:93)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testFailingMapper

Error Message:
java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getTaskAttemptCompletionEvents(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getTaskAttemptCompletionEvents(MRClientProtocolPBClientImpl.java:177)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getTaskCompletionEvents(ClientServiceDelegate.java:395)
	at org.apache.hadoop.mapred.YARNRunner.getTaskCompletionEvents(YARNRunner.java:607)
	at org.apache.hadoop.mapreduce.Job$5.run(Job.java:673)
	at org.apache.hadoop.mapreduce.Job$5.run(Job.java:670)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.getTaskCompletionEvents(Job.java:670)
	at org.apache.hadoop.mapreduce.v2.TestUberAM.testFailingMapper(TestUberAM.java:132)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testDistributedCache

Error Message:
java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:343)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs._testDistributedCache(TestMRJobs.java:906)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testDistributedCache(TestMRJobs.java:916)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
	at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:792)
	at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:732)
	at org.apache.hadoop.ipc.Client.call(Client.java:1447)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs._testDistributedCache(TestMRJobs.java:906)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testDistributedCache(TestMRJobs.java:916)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Connection refused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs._testDistributedCache(TestMRJobs.java:906)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testDistributedCache(TestMRJobs.java:916)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testConfVerificationWithJobClient

Error Message:
java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:343)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapred.JobClient$NetworkedJob.waitForCompletion(JobClient.java:287)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:348)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient(TestMRJobs.java:263)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
	at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:792)
	at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:732)
	at org.apache.hadoop.ipc.Client.call(Client.java:1447)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapred.JobClient$NetworkedJob.waitForCompletion(JobClient.java:287)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:348)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient(TestMRJobs.java:263)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Connection refused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapred.JobClient$NetworkedJob.waitForCompletion(JobClient.java:287)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:348)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient(TestMRJobs.java:263)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testContainerRollingLog

Error Message:
java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:343)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isUber(Job.java:1479)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog(TestMRJobs.java:694)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
	at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:792)
	at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:732)
	at org.apache.hadoop.ipc.Client.call(Client.java:1447)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isUber(Job.java:1479)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog(TestMRJobs.java:694)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Connection refused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isUber(Job.java:1479)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog(TestMRJobs.java:694)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart

Error Message:
Address before restart: asf904.gq1.ygridcore.net:0 is different from new address: asf904.gq1.ygridcore.net:45055 expected:<...4.gq1.ygridcore.net:[0]> but was:<...4.gq1.ygridcore.net:[45055]>

Stack Trace:
org.junit.ComparisonFailure: Address before restart: asf904.gq1.ygridcore.net:0 is different from new address: asf904.gq1.ygridcore.net:45055 expected:<...4.gq1.ygridcore.net:[0]> but was:<...4.gq1.ygridcore.net:[45055]>
	at org.junit.Assert.assertEquals(Assert.java:115)
	at org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart(TestMiniMRClientCluster.java:146)


FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob

Error Message:
org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
 at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
 at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
 at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2285)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
 ... 10 more


Stack Trace:
java.io.IOException: org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
	at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
	at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2285)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
	... 10 more

	at org.apache.hadoop.ipc.Client.call(Client.java:1443)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy84.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getDelegationToken(ApplicationClientProtocolPBClientImpl.java:339)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:255)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy85.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getRMDelegationToken(YarnClientImpl.java:541)
	at org.apache.hadoop.mapred.ResourceMgrDelegate.getDelegationToken(ResourceMgrDelegate.java:177)
	at org.apache.hadoop.mapred.YARNRunner.getDelegationToken(YARNRunner.java:231)
	at org.apache.hadoop.mapreduce.Cluster.getDelegationToken(Cluster.java:401)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1234)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1231)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapred.JobClient.getDelegationToken(JobClient.java:1230)
	at org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob(TestNetworkedJob.java:260)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData

Error Message:
java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy88.getCounters(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getCounters(MRClientProtocolPBClientImpl.java:166)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobCounters(ClientServiceDelegate.java:380)
	at org.apache.hadoop.mapred.YARNRunner.getJobCounters(YARNRunner.java:589)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:761)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:758)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.getCounters(Job.java:758)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData(TestMRJobsWithHistoryService.java:153)



Hadoop-Mapreduce-trunk - Build # 2515 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2515/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 36645 lines...]
Results :

Failed tests: 
  TestMiniMRClientCluster.testRestart:146 Address before restart: asf907.gq1.ygridcore.net:0 is different from new address: asf907.gq1.ygridcore.net:51655 expected:<...7.gq1.ygridcore.net:[0]> but was:<...7.gq1.ygridcore.net:[51655]>

Tests in error: 
  TestUberAM.testFailingMapper:132 » IO java.net.ConnectException: Call From asf...
  TestUberAM>TestMRJobs.testDistributedCache:916->TestMRJobs._testDistributedCache:906 » IO
  TestUberAM>TestMRJobs.testConfVerificationWithJobClient:263->TestMRJobs.testConfVerification:348 » IO
  TestUberAM>TestMRJobs.testContainerRollingLog:694 » IO java.net.ConnectExcepti...
  TestMRJobs.testConfVerificationWithJobClient:263->testConfVerification:353 » IO
  TestMRJobs.testContainerRollingLog:694 » IO java.net.ConnectException: Call Fr...
  TestMRJobsWithProfiler.testDifferentProfilers:117->testProfilerInternal:150 » 
  TestNonExistentJob.testGetInvalidJob:93 » IO java.net.ConnectException: Call F...
  TestMRJobsWithHistoryService.testJobHistoryData:153 » IO java.net.ConnectExcep...
  TestMRTimelineEventHandling.testMRTimelineEventHandling:99 » IO Job didn't fin...
  TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled:162 » IO Jo...
  TestNetworkedJob.testNetworkedJob:260 » IO org.apache.hadoop.yarn.exceptions.Y...

Tests run: 522, Failures: 1, Errors: 12, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.733 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [02:08 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 36.736 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  6.558 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [10:59 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [06:36 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  02:21 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:42 h
[INFO] Finished at: 2015-10-22T17:51:24+00:00
[INFO] Final Memory: 40M/1058M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There was a timeout or other error in the fork -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating YARN-3739
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
13 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling(TestMRTimelineEventHandling.java:99)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled(TestMRTimelineEventHandling.java:162)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient

Error Message:
java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor80.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.getStatus(Job.java:338)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:353)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient(TestMRJobs.java:263)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog

Error Message:
java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor80.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isUber(Job.java:1479)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog(TestMRJobs.java:694)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testDifferentProfilers

Error Message:
test timed out after 150000 milliseconds

Stack Trace:
java.lang.Exception: test timed out after 150000 milliseconds
	at java.lang.Thread.sleep(Native Method)
	at org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1353)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1311)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testProfilerInternal(TestMRJobsWithProfiler.java:150)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testDifferentProfilers(TestMRJobsWithProfiler.java:117)


FAILED:  org.apache.hadoop.mapreduce.v2.TestNonExistentJob.testGetInvalidJob

Error Message:
java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy93.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Cluster.getJob(Cluster.java:184)
	at org.apache.hadoop.mapred.JobClient$2.run(JobClient.java:593)
	at org.apache.hadoop.mapred.JobClient$2.run(JobClient.java:591)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapred.JobClient.getJobUsingCluster(JobClient.java:591)
	at org.apache.hadoop.mapred.JobClient.getJobInner(JobClient.java:601)
	at org.apache.hadoop.mapred.JobClient.getJob(JobClient.java:631)
	at org.apache.hadoop.mapreduce.v2.TestNonExistentJob.testGetInvalidJob(TestNonExistentJob.java:93)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testFailingMapper

Error Message:
java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getTaskAttemptCompletionEvents(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getTaskAttemptCompletionEvents(MRClientProtocolPBClientImpl.java:177)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getTaskCompletionEvents(ClientServiceDelegate.java:395)
	at org.apache.hadoop.mapred.YARNRunner.getTaskCompletionEvents(YARNRunner.java:607)
	at org.apache.hadoop.mapreduce.Job$5.run(Job.java:673)
	at org.apache.hadoop.mapreduce.Job$5.run(Job.java:670)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.getTaskCompletionEvents(Job.java:670)
	at org.apache.hadoop.mapreduce.v2.TestUberAM.testFailingMapper(TestUberAM.java:132)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testDistributedCache

Error Message:
java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:343)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs._testDistributedCache(TestMRJobs.java:906)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testDistributedCache(TestMRJobs.java:916)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
	at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:792)
	at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:732)
	at org.apache.hadoop.ipc.Client.call(Client.java:1447)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs._testDistributedCache(TestMRJobs.java:906)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testDistributedCache(TestMRJobs.java:916)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Connection refused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs._testDistributedCache(TestMRJobs.java:906)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testDistributedCache(TestMRJobs.java:916)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testConfVerificationWithJobClient

Error Message:
java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:343)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapred.JobClient$NetworkedJob.waitForCompletion(JobClient.java:287)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:348)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient(TestMRJobs.java:263)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
	at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:792)
	at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:732)
	at org.apache.hadoop.ipc.Client.call(Client.java:1447)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapred.JobClient$NetworkedJob.waitForCompletion(JobClient.java:287)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:348)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient(TestMRJobs.java:263)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Connection refused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapred.JobClient$NetworkedJob.waitForCompletion(JobClient.java:287)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:348)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient(TestMRJobs.java:263)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testContainerRollingLog

Error Message:
java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:343)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isUber(Job.java:1479)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog(TestMRJobs.java:694)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
	at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:792)
	at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:732)
	at org.apache.hadoop.ipc.Client.call(Client.java:1447)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isUber(Job.java:1479)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog(TestMRJobs.java:694)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Connection refused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isUber(Job.java:1479)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog(TestMRJobs.java:694)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart

Error Message:
Address before restart: asf907.gq1.ygridcore.net:0 is different from new address: asf907.gq1.ygridcore.net:51655 expected:<...7.gq1.ygridcore.net:[0]> but was:<...7.gq1.ygridcore.net:[51655]>

Stack Trace:
org.junit.ComparisonFailure: Address before restart: asf907.gq1.ygridcore.net:0 is different from new address: asf907.gq1.ygridcore.net:51655 expected:<...7.gq1.ygridcore.net:[0]> but was:<...7.gq1.ygridcore.net:[51655]>
	at org.junit.Assert.assertEquals(Assert.java:115)
	at org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart(TestMiniMRClientCluster.java:146)


FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob

Error Message:
org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
 at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
 at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
 at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2285)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
 ... 10 more


Stack Trace:
java.io.IOException: org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
	at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
	at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2285)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
	... 10 more

	at org.apache.hadoop.ipc.Client.call(Client.java:1443)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy78.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getDelegationToken(ApplicationClientProtocolPBClientImpl.java:339)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:255)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy81.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getRMDelegationToken(YarnClientImpl.java:541)
	at org.apache.hadoop.mapred.ResourceMgrDelegate.getDelegationToken(ResourceMgrDelegate.java:177)
	at org.apache.hadoop.mapred.YARNRunner.getDelegationToken(YARNRunner.java:231)
	at org.apache.hadoop.mapreduce.Cluster.getDelegationToken(Cluster.java:401)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1234)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1231)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapred.JobClient.getDelegationToken(JobClient.java:1230)
	at org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob(TestNetworkedJob.java:260)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData

Error Message:
java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf907.gq1.ygridcore.net/67.195.81.151 to asf907.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy88.getCounters(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getCounters(MRClientProtocolPBClientImpl.java:166)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobCounters(ClientServiceDelegate.java:380)
	at org.apache.hadoop.mapred.YARNRunner.getJobCounters(YARNRunner.java:589)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:761)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:758)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.getCounters(Job.java:758)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData(TestMRJobsWithHistoryService.java:153)



Hadoop-Mapreduce-trunk - Build # 2514 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2514/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 33454 lines...]
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 4.742 sec - in org.apache.hadoop.mapred.TestJavaSerialization
Running org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 27.402 sec - in org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Running org.apache.hadoop.ipc.TestMRCJCSocketFactory
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 21.463 sec - in org.apache.hadoop.ipc.TestMRCJCSocketFactory

Results :

Failed tests: 
  TestMiniMRClientCluster.testRestart:146 Address before restart: asf904.gq1.ygridcore.net:0 is different from new address: asf904.gq1.ygridcore.net:59871 expected:<...4.gq1.ygridcore.net:[0]> but was:<...4.gq1.ygridcore.net:[59871]>

Tests in error: 
  TestMRJobsWithHistoryService.testJobHistoryData:153 » IO java.net.ConnectExcep...
  TestNetworkedJob.testNetworkedJob:260 » IO org.apache.hadoop.yarn.exceptions.Y...
  TestJobCleanup.tearDown:91 » NoClassDefFound org/apache/hadoop/service/Service...

Tests run: 524, Failures: 1, Errors: 3, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.791 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:45 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 29.302 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  4.922 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:20 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:36 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:44 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:01 h
[INFO] Finished at: 2015-10-22T06:01:13+00:00
[INFO] Final Memory: 34M/658M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HADOOP-9692
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
4 tests failed.
FAILED:  org.apache.hadoop.mapred.TestJobCleanup.org.apache.hadoop.mapred.TestJobCleanup

Error Message:
org/apache/hadoop/service/ServiceOperations

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/service/ServiceOperations
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.service.CompositeService.stop(CompositeService.java:157)
	at org.apache.hadoop.service.CompositeService.serviceStop(CompositeService.java:131)
	at org.apache.hadoop.service.AbstractService.stop(AbstractService.java:221)
	at org.apache.hadoop.mapred.MiniMRYarnClusterAdapter.stop(MiniMRYarnClusterAdapter.java:55)
	at org.apache.hadoop.mapred.MiniMRCluster.shutdown(MiniMRCluster.java:267)
	at org.apache.hadoop.mapred.TestJobCleanup.tearDown(TestJobCleanup.java:91)


FAILED:  org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart

Error Message:
Address before restart: asf904.gq1.ygridcore.net:0 is different from new address: asf904.gq1.ygridcore.net:59871 expected:<...4.gq1.ygridcore.net:[0]> but was:<...4.gq1.ygridcore.net:[59871]>

Stack Trace:
org.junit.ComparisonFailure: Address before restart: asf904.gq1.ygridcore.net:0 is different from new address: asf904.gq1.ygridcore.net:59871 expected:<...4.gq1.ygridcore.net:[0]> but was:<...4.gq1.ygridcore.net:[59871]>
	at org.junit.Assert.assertEquals(Assert.java:115)
	at org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart(TestMiniMRClientCluster.java:146)


FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob

Error Message:
org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
 at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
 at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
 at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2287)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
 ... 10 more


Stack Trace:
java.io.IOException: org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
	at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
	at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2287)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
	... 10 more

	at org.apache.hadoop.ipc.Client.call(Client.java:1443)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy84.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getDelegationToken(ApplicationClientProtocolPBClientImpl.java:339)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:255)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy85.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getRMDelegationToken(YarnClientImpl.java:541)
	at org.apache.hadoop.mapred.ResourceMgrDelegate.getDelegationToken(ResourceMgrDelegate.java:177)
	at org.apache.hadoop.mapred.YARNRunner.getDelegationToken(YARNRunner.java:231)
	at org.apache.hadoop.mapreduce.Cluster.getDelegationToken(Cluster.java:401)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1234)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1231)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapred.JobClient.getDelegationToken(JobClient.java:1230)
	at org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob(TestNetworkedJob.java:260)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData

Error Message:
java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf904.gq1.ygridcore.net/67.195.81.148 to asf904.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy88.getCounters(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getCounters(MRClientProtocolPBClientImpl.java:166)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobCounters(ClientServiceDelegate.java:380)
	at org.apache.hadoop.mapred.YARNRunner.getJobCounters(YARNRunner.java:589)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:761)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:758)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.getCounters(Job.java:758)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData(TestMRJobsWithHistoryService.java:153)



Hadoop-Mapreduce-trunk - Build # 2513 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2513/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 33466 lines...]
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 13.545 sec - in org.apache.hadoop.mapred.TestReporter

Results :

Tests in error: 
  TestMRJobsWithHistoryService.testJobHistoryData:153 » IO java.net.ConnectExcep...
  TestNetworkedJob.testNetworkedJob:260 » IO org.apache.hadoop.yarn.exceptions.Y...
  TestMiniMRClientCluster.setup:80 » YarnRuntime org.apache.hadoop.yarn.exceptio...
  TestMiniMRClientCluster.cleanup:93 NullPointer

Tests run: 523, Failures: 0, Errors: 4, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.986 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:43 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 31.547 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  4.921 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:29 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:42 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:40 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 01:58 h
[INFO] Finished at: 2015-10-22T02:52:52+00:00
[INFO] Final Memory: 34M/608M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HADOOP-10406
Updating HDFS-9225
Updating HDFS-9273
Updating HDFS-9278
Updating HDFS-9070
Updating HDFS-7087
Updating MAPREDUCE-6518
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
4 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMiniMRClientCluster.org.apache.hadoop.mapred.TestMiniMRClientCluster

Error Message:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: Failed to intialize existing directories

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: org.apache.hadoop.yarn.exceptions.YarnRuntimeException: Failed to intialize existing directories
	at org.apache.hadoop.fs.RawLocalFileSystem.listStatus(RawLocalFileSystem.java:460)
	at org.apache.hadoop.fs.DelegateToFileSystem.listStatus(DelegateToFileSystem.java:168)
	at org.apache.hadoop.fs.ChecksumFs.listStatus(ChecksumFs.java:521)
	at org.apache.hadoop.fs.AbstractFileSystem$1.<init>(AbstractFileSystem.java:890)
	at org.apache.hadoop.fs.AbstractFileSystem.listStatusIterator(AbstractFileSystem.java:888)
	at org.apache.hadoop.fs.FileContext$22.next(FileContext.java:1492)
	at org.apache.hadoop.fs.FileContext$22.next(FileContext.java:1487)
	at org.apache.hadoop.fs.FSLinkResolver.resolve(FSLinkResolver.java:90)
	at org.apache.hadoop.fs.FileContext.listStatus(FileContext.java:1487)
	at org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils.localGlobber(JobHistoryUtils.java:456)
	at org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils.localGlobber(JobHistoryUtils.java:444)
	at org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils.localGlobber(JobHistoryUtils.java:439)
	at org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.findTimestampedDirectories(HistoryFileManager.java:778)
	at org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.initExisting(HistoryFileManager.java:672)
	at org.apache.hadoop.mapreduce.v2.hs.JobHistory.serviceInit(JobHistory.java:97)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer.serviceInit(JobHistoryServer.java:151)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster$JobHistoryServerWrapper.serviceStart(MiniMRYarnCluster.java:210)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.service.CompositeService.serviceStart(CompositeService.java:120)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:80)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:41)
	at org.apache.hadoop.mapred.TestMiniMRClientCluster.setup(TestMiniMRClientCluster.java:80)


FAILED:  org.apache.hadoop.mapred.TestMiniMRClientCluster.org.apache.hadoop.mapred.TestMiniMRClientCluster

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.mapred.TestMiniMRClientCluster.cleanup(TestMiniMRClientCluster.java:93)


FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob

Error Message:
org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
 at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
 at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
 at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2285)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
 ... 10 more


Stack Trace:
java.io.IOException: org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
	at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
	at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2285)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
	... 10 more

	at org.apache.hadoop.ipc.Client.call(Client.java:1443)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy84.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getDelegationToken(ApplicationClientProtocolPBClientImpl.java:339)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:255)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy85.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getRMDelegationToken(YarnClientImpl.java:541)
	at org.apache.hadoop.mapred.ResourceMgrDelegate.getDelegationToken(ResourceMgrDelegate.java:177)
	at org.apache.hadoop.mapred.YARNRunner.getDelegationToken(YARNRunner.java:231)
	at org.apache.hadoop.mapreduce.Cluster.getDelegationToken(Cluster.java:401)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1234)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1231)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapred.JobClient.getDelegationToken(JobClient.java:1230)
	at org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob(TestNetworkedJob.java:260)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData

Error Message:
java.net.ConnectException: Call From asf902.gq1.ygridcore.net/67.195.81.146 to asf902.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf902.gq1.ygridcore.net/67.195.81.146 to asf902.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy88.getCounters(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getCounters(MRClientProtocolPBClientImpl.java:166)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobCounters(ClientServiceDelegate.java:380)
	at org.apache.hadoop.mapred.YARNRunner.getJobCounters(YARNRunner.java:589)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:761)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:758)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.getCounters(Job.java:758)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData(TestMRJobsWithHistoryService.java:153)



Hadoop-Mapreduce-trunk - Build # 2512 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2512/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 34084 lines...]
  TestMiniMRWithDFSWithDistinctUsers.setUp:97 » NoClassDefFound org/apache/hadoo...
  TestMiniMRWithDFSWithDistinctUsers.setUp:97 » NoClassDefFound org/apache/hadoo...
  TestMRTimelineEventHandling.testMRTimelineEventHandling:93 » YarnRuntime java....
  TestMRTimelineEventHandling.testTimelineServiceStartInMiniCluster:52 » YarnRuntime
  TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled:147 » YarnRuntime
  TestNetworkedJob.testNetworkedJob:260 » IO org.apache.hadoop.yarn.exceptions.Y...
  TestNonExistentJob.testGetInvalidJob:93 » IO java.net.ConnectException: Call F...
  TestUberAM.testFailingMapper:132 » IO java.net.ConnectException: Call From asf...
  TestUberAM>TestMRJobs.testDistributedCache:916->TestMRJobs._testDistributedCache:906 » IO
  TestUberAM>TestMRJobs.testConfVerificationWithJobClient:263->TestMRJobs.testConfVerification:353 » IO
  TestUberAM>TestMRJobs.testContainerRollingLog:694 » IO java.net.ConnectExcepti...
  TestMRJobs.testConfVerificationWithJobClient:263->testConfVerification:353 » IO
  TestMRJobs.testContainerRollingLog:694 » IO java.net.ConnectException: Call Fr...
  TestMRJobsWithHistoryService.testJobHistoryData:153 » IO java.net.ConnectExcep...

Tests run: 523, Failures: 1, Errors: 24, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.480 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:53 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 31.939 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.227 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [10:29 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [06:19 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:43 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:03 h
[INFO] Finished at: 2015-10-22T00:14:19+00:00
[INFO] Final Memory: 35M/751M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9274
Updating YARN-2513
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
25 tests failed.
FAILED:  org.apache.hadoop.mapred.TestJobCounters.testHeapUsageCounter

Error Message:
org/apache/hadoop/service/ServiceOperations

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/service/ServiceOperations
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.service.CompositeService.stop(CompositeService.java:157)
	at org.apache.hadoop.service.CompositeService.serviceStop(CompositeService.java:131)
	at org.apache.hadoop.service.AbstractService.stop(AbstractService.java:221)
	at org.apache.hadoop.mapred.MiniMRYarnClusterAdapter.stop(MiniMRYarnClusterAdapter.java:55)
	at org.apache.hadoop.mapred.MiniMRCluster.shutdown(MiniMRCluster.java:267)
	at org.apache.hadoop.mapred.TestJobCounters.testHeapUsageCounter(TestJobCounters.java:716)


FAILED:  org.apache.hadoop.mapred.TestJobCounters.testNewCounterA

Error Message:
org/apache/hadoop/io/IntWritable

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/io/IntWritable
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.mapred.TestJobCounters.createJob(TestJobCounters.java:239)
	at org.apache.hadoop.mapred.TestJobCounters.testNewCounterA(TestJobCounters.java:380)


FAILED:  org.apache.hadoop.mapred.TestJobCounters.testNewCounterB

Error Message:
org/apache/hadoop/io/IntWritable

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/io/IntWritable
	at org.apache.hadoop.mapred.TestJobCounters.createJob(TestJobCounters.java:239)
	at org.apache.hadoop.mapred.TestJobCounters.testNewCounterB(TestJobCounters.java:401)


FAILED:  org.apache.hadoop.mapred.TestJobCounters.testNewCounterC

Error Message:
org/apache/hadoop/io/IntWritable

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/io/IntWritable
	at org.apache.hadoop.mapred.TestJobCounters.createJob(TestJobCounters.java:239)
	at org.apache.hadoop.mapred.TestJobCounters.testNewCounterC(TestJobCounters.java:423)


FAILED:  org.apache.hadoop.mapred.TestJobCounters.testNewCounterD

Error Message:
org/apache/hadoop/io/IntWritable

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/io/IntWritable
	at org.apache.hadoop.mapred.TestJobCounters.createJob(TestJobCounters.java:239)
	at org.apache.hadoop.mapred.TestJobCounters.testNewCounterD(TestJobCounters.java:446)


FAILED:  org.apache.hadoop.mapred.TestJobCounters.testOldCounterA

Error Message:
org/apache/hadoop/util/ClassUtil

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/util/ClassUtil
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.mapred.JobConf.setJarByClass(JobConf.java:541)
	at org.apache.hadoop.mapred.JobConf.<init>(JobConf.java:437)
	at org.apache.hadoop.mapred.TestJobCounters.createConfiguration(TestJobCounters.java:219)
	at org.apache.hadoop.mapred.TestJobCounters.testOldCounterA(TestJobCounters.java:254)


FAILED:  org.apache.hadoop.mapred.TestJobCounters.testOldCounterB

Error Message:
org/apache/hadoop/util/ClassUtil

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/util/ClassUtil
	at org.apache.hadoop.mapred.JobConf.setJarByClass(JobConf.java:541)
	at org.apache.hadoop.mapred.JobConf.<init>(JobConf.java:437)
	at org.apache.hadoop.mapred.TestJobCounters.createConfiguration(TestJobCounters.java:219)
	at org.apache.hadoop.mapred.TestJobCounters.testOldCounterB(TestJobCounters.java:296)


FAILED:  org.apache.hadoop.mapred.TestJobCounters.testOldCounterC

Error Message:
org/apache/hadoop/util/ClassUtil

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/util/ClassUtil
	at org.apache.hadoop.mapred.JobConf.setJarByClass(JobConf.java:541)
	at org.apache.hadoop.mapred.JobConf.<init>(JobConf.java:437)
	at org.apache.hadoop.mapred.TestJobCounters.createConfiguration(TestJobCounters.java:219)
	at org.apache.hadoop.mapred.TestJobCounters.testOldCounterC(TestJobCounters.java:328)


FAILED:  org.apache.hadoop.mapred.TestJobCounters.testOldCounterD

Error Message:
org/apache/hadoop/util/ClassUtil

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/util/ClassUtil
	at org.apache.hadoop.mapred.JobConf.setJarByClass(JobConf.java:541)
	at org.apache.hadoop.mapred.JobConf.<init>(JobConf.java:437)
	at org.apache.hadoop.mapred.TestJobCounters.createConfiguration(TestJobCounters.java:219)
	at org.apache.hadoop.mapred.TestJobCounters.testOldCounterD(TestJobCounters.java:356)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling

Error Message:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/MiniYARNCluster$NodeManagerWrapper$1

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/MiniYARNCluster$NodeManagerWrapper$1
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$NodeManagerWrapper.serviceStart(MiniYARNCluster.java:566)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.service.CompositeService.serviceStart(CompositeService.java:120)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling(TestMRTimelineEventHandling.java:93)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testTimelineServiceStartInMiniCluster

Error Message:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/MiniYARNCluster$NodeManagerWrapper$1

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/MiniYARNCluster$NodeManagerWrapper$1
	at org.apache.hadoop.yarn.server.MiniYARNCluster$NodeManagerWrapper.serviceStart(MiniYARNCluster.java:566)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.service.CompositeService.serviceStart(CompositeService.java:120)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testTimelineServiceStartInMiniCluster(TestMRTimelineEventHandling.java:52)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled

Error Message:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/MiniYARNCluster$NodeManagerWrapper$1

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/MiniYARNCluster$NodeManagerWrapper$1
	at org.apache.hadoop.yarn.server.MiniYARNCluster$NodeManagerWrapper.serviceStart(MiniYARNCluster.java:566)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.service.CompositeService.serviceStart(CompositeService.java:120)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled(TestMRTimelineEventHandling.java:147)


FAILED:  org.apache.hadoop.mapred.TestMerge.testMerge

Error Message:
org/apache/hadoop/hdfs/server/namenode/JournalSet$5

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/hdfs/server/namenode/JournalSet$5
	at org.apache.hadoop.hdfs.server.namenode.JournalSet.close(JournalSet.java:243)
	at org.apache.hadoop.hdfs.server.namenode.FSEditLog.close(FSEditLog.java:368)
	at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.stopActiveServices(FSNamesystem.java:1207)
	at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.close(FSNamesystem.java:1578)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.stopCommonServices(NameNode.java:723)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.stop(NameNode.java:893)
	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:1913)
	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:1882)
	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:1875)
	at org.apache.hadoop.mapred.TestMerge.testMerge(TestMerge.java:90)


FAILED:  org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart

Error Message:
Address before restart: asf906.gq1.ygridcore.net:0 is different from new address: asf906.gq1.ygridcore.net:59293 expected:<...6.gq1.ygridcore.net:[0]> but was:<...6.gq1.ygridcore.net:[59293]>

Stack Trace:
org.junit.ComparisonFailure: Address before restart: asf906.gq1.ygridcore.net:0 is different from new address: asf906.gq1.ygridcore.net:59293 expected:<...6.gq1.ygridcore.net:[0]> but was:<...6.gq1.ygridcore.net:[59293]>
	at org.junit.Assert.assertEquals(Assert.java:115)
	at org.apache.hadoop.mapred.TestMiniMRClientCluster.testRestart(TestMiniMRClientCluster.java:146)


FAILED:  org.apache.hadoop.mapred.TestMiniMRWithDFSWithDistinctUsers.testMultipleSpills

Error Message:
org/apache/hadoop/yarn/event/EventHandler

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/event/EventHandler
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:58)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.TestMiniMRWithDFSWithDistinctUsers.setUp(TestMiniMRWithDFSWithDistinctUsers.java:97)


FAILED:  org.apache.hadoop.mapred.TestMiniMRWithDFSWithDistinctUsers.testDistinctUsers

Error Message:
org/apache/hadoop/yarn/event/EventHandler

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/event/EventHandler
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:58)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.TestMiniMRWithDFSWithDistinctUsers.setUp(TestMiniMRWithDFSWithDistinctUsers.java:97)


FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob

Error Message:
org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
 at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
 at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
 at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2285)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
 ... 10 more


Stack Trace:
java.io.IOException: org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
	at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
	at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2285)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
	... 10 more

	at org.apache.hadoop.ipc.Client.call(Client.java:1443)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy84.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getDelegationToken(ApplicationClientProtocolPBClientImpl.java:339)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:255)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy85.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getRMDelegationToken(YarnClientImpl.java:541)
	at org.apache.hadoop.mapred.ResourceMgrDelegate.getDelegationToken(ResourceMgrDelegate.java:177)
	at org.apache.hadoop.mapred.YARNRunner.getDelegationToken(YARNRunner.java:231)
	at org.apache.hadoop.mapreduce.Cluster.getDelegationToken(Cluster.java:401)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1234)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1231)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapred.JobClient.getDelegationToken(JobClient.java:1230)
	at org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob(TestNetworkedJob.java:260)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient

Error Message:
java.net.ConnectException: Call From asf906.gq1.ygridcore.net/67.195.81.150 to asf906.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf906.gq1.ygridcore.net/67.195.81.150 to asf906.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor80.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.getStatus(Job.java:338)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:353)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient(TestMRJobs.java:263)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog

Error Message:
java.net.ConnectException: Call From asf906.gq1.ygridcore.net/67.195.81.150 to asf906.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf906.gq1.ygridcore.net/67.195.81.150 to asf906.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor80.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isUber(Job.java:1479)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog(TestMRJobs.java:694)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData

Error Message:
java.net.ConnectException: Call From asf906.gq1.ygridcore.net/67.195.81.150 to asf906.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf906.gq1.ygridcore.net/67.195.81.150 to asf906.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy88.getCounters(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getCounters(MRClientProtocolPBClientImpl.java:166)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobCounters(ClientServiceDelegate.java:380)
	at org.apache.hadoop.mapred.YARNRunner.getJobCounters(YARNRunner.java:589)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:761)
	at org.apache.hadoop.mapreduce.Job$7.run(Job.java:758)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.getCounters(Job.java:758)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData(TestMRJobsWithHistoryService.java:153)


FAILED:  org.apache.hadoop.mapreduce.v2.TestNonExistentJob.testGetInvalidJob

Error Message:
java.net.ConnectException: Call From asf906.gq1.ygridcore.net/67.195.81.150 to asf906.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf906.gq1.ygridcore.net/67.195.81.150 to asf906.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy93.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Cluster.getJob(Cluster.java:184)
	at org.apache.hadoop.mapred.JobClient$2.run(JobClient.java:593)
	at org.apache.hadoop.mapred.JobClient$2.run(JobClient.java:591)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapred.JobClient.getJobUsingCluster(JobClient.java:591)
	at org.apache.hadoop.mapred.JobClient.getJobInner(JobClient.java:601)
	at org.apache.hadoop.mapred.JobClient.getJob(JobClient.java:631)
	at org.apache.hadoop.mapreduce.v2.TestNonExistentJob.testGetInvalidJob(TestNonExistentJob.java:93)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testFailingMapper

Error Message:
java.net.ConnectException: Call From asf906.gq1.ygridcore.net/67.195.81.150 to asf906.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf906.gq1.ygridcore.net/67.195.81.150 to asf906.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getTaskAttemptCompletionEvents(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getTaskAttemptCompletionEvents(MRClientProtocolPBClientImpl.java:177)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getTaskCompletionEvents(ClientServiceDelegate.java:395)
	at org.apache.hadoop.mapred.YARNRunner.getTaskCompletionEvents(YARNRunner.java:607)
	at org.apache.hadoop.mapreduce.Job$5.run(Job.java:673)
	at org.apache.hadoop.mapreduce.Job$5.run(Job.java:670)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.getTaskCompletionEvents(Job.java:670)
	at org.apache.hadoop.mapreduce.v2.TestUberAM.testFailingMapper(TestUberAM.java:132)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testDistributedCache

Error Message:
java.net.ConnectException: Call From asf906.gq1.ygridcore.net/67.195.81.150 to asf906.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf906.gq1.ygridcore.net/67.195.81.150 to asf906.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:343)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs._testDistributedCache(TestMRJobs.java:906)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testDistributedCache(TestMRJobs.java:916)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Call From asf906.gq1.ygridcore.net/67.195.81.150 to asf906.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
	at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:792)
	at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:732)
	at org.apache.hadoop.ipc.Client.call(Client.java:1447)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs._testDistributedCache(TestMRJobs.java:906)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testDistributedCache(TestMRJobs.java:916)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Connection refused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:604)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1316)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs._testDistributedCache(TestMRJobs.java:906)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testDistributedCache(TestMRJobs.java:916)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testConfVerificationWithJobClient

Error Message:
java.net.ConnectException: Call From asf906.gq1.ygridcore.net/67.195.81.150 to asf906.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf906.gq1.ygridcore.net/67.195.81.150 to asf906.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:343)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.getStatus(Job.java:338)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:353)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient(TestMRJobs.java:263)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Call From asf906.gq1.ygridcore.net/67.195.81.150 to asf906.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
	at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:792)
	at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:732)
	at org.apache.hadoop.ipc.Client.call(Client.java:1447)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.getStatus(Job.java:338)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:353)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient(TestMRJobs.java:263)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Connection refused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.getStatus(Job.java:338)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:353)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient(TestMRJobs.java:263)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testContainerRollingLog

Error Message:
java.net.ConnectException: Call From asf906.gq1.ygridcore.net/67.195.81.150 to asf906.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused

Stack Trace:
java.io.IOException: java.net.ConnectException: Call From asf906.gq1.ygridcore.net/67.195.81.150 to asf906.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:343)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isUber(Job.java:1479)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog(TestMRJobs.java:694)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Call From asf906.gq1.ygridcore.net/67.195.81.150 to asf906.gq1.ygridcore.net:0 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
	at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:792)
	at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:732)
	at org.apache.hadoop.ipc.Client.call(Client.java:1447)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isUber(Job.java:1479)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog(TestMRJobs.java:694)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.net.ConnectException: Connection refused
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
	at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:633)
	at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:732)
	at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:378)
	at org.apache.hadoop.ipc.Client.getConnection(Client.java:1504)
	at org.apache.hadoop.ipc.Client.call(Client.java:1419)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy97.getJobReport(Unknown Source)
	at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
	at sun.reflect.GeneratedMethodAccessor26.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:324)
	at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:428)
	at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:600)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:323)
	at org.apache.hadoop.mapreduce.Job$1.run(Job.java:320)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:320)
	at org.apache.hadoop.mapreduce.Job.isUber(Job.java:1479)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog(TestMRJobs.java:694)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)



Hadoop-Mapreduce-trunk - Build # 2511 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2511/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32837 lines...]
Running org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator
Killed
Running org.apache.hadoop.mapreduce.v2.app.rm.TestRMCommunicator
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.787 sec - in org.apache.hadoop.mapreduce.v2.app.rm.TestRMCommunicator
Running org.apache.hadoop.mapreduce.v2.app.TestKill
Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 11.859 sec - in org.apache.hadoop.mapreduce.v2.app.TestKill
Running org.apache.hadoop.mapreduce.TestMapreduceConfigFields
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.794 sec - in org.apache.hadoop.mapreduce.TestMapreduceConfigFields
Running org.apache.hadoop.mapred.TestTaskAttemptListenerImpl
Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.362 sec - in org.apache.hadoop.mapred.TestTaskAttemptListenerImpl
Running org.apache.hadoop.mapred.TestLocalContainerLauncher
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.891 sec - in org.apache.hadoop.mapred.TestLocalContainerLauncher
Running org.apache.hadoop.mapred.TestTaskAttemptFinishingMonitor
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.587 sec - in org.apache.hadoop.mapred.TestTaskAttemptFinishingMonitor

Results :

Tests run: 313, Failures: 0, Errors: 0, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.827 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:38 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 28.919 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.664 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [07:55 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 10:12 min
[INFO] Finished at: 2015-10-21T20:46:46+00:00
[INFO] Final Memory: 39M/825M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: ExecutionException: java.lang.RuntimeException: The forked VM terminated without properly saying goodbye. VM crash or System.exit called?
[ERROR] Command was /bin/sh -c cd /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app && /home/jenkins/tools/java/jdk1.7.0_55/jre/bin/java -Xmx2048m -XX:MaxPermSize=768m -XX:+HeapDumpOnOutOfMemoryError -jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire/surefirebooter7916555063856945583.jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire/surefire645127742497275962tmp /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire/surefire_1037337172383740923225tmp
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HADOOP-11791
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
All tests passed

Hadoop-Mapreduce-trunk - Build # 2510 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2510/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 35816 lines...]
  TestMRIntermediateDataEncryption.testUberMode:60->doEncryptionTest:75->doEncryptionTest:90 » NoClassDefFound
  TestMRIntermediateDataEncryption.testMultipleMapsPerNode:65->doEncryptionTest:75->doEncryptionTest:90 » NoClassDefFound
  TestMRIntermediateDataEncryption.testMultipleReducers:70->doEncryptionTest:75->doEncryptionTest:90 » NoClassDefFound
  TestMRIntermediateDataEncryption.testSingleReducer:55->doEncryptionTest:75->doEncryptionTest:90 » NoClassDefFound
  TestReduceFetchFromPartialMem$1>TestSetup.run:27->setUp:61 » NoClassDefFound o...
  TestLazyOutput.testLazyOutput:147 » NoClassDefFound org/apache/hadoop/yarn/ser...
  TestJobCleanup.setUp:73 » NoClassDefFound org/apache/hadoop/yarn/server/timeli...
  TestMiniMRBringup.testBringUp:32 » NoClassDefFound org/apache/hadoop/yarn/serv...
  TestMRTimelineEventHandling.testMRTimelineEventHandling:92 » NoClassDefFound o...
  TestMRTimelineEventHandling.testTimelineServiceStartInMiniCluster:51 » NoClassDefFound
  TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled:146 » NoClassDefFound
  TestMiniMRWithDFSWithDistinctUsers.setUp:97 » NoClassDefFound org/apache/hadoo...
  TestMiniMRWithDFSWithDistinctUsers.setUp:97 » NoClassDefFound org/apache/hadoo...
  TestSpecialCharactersInOutputPath.testJobWithDFS:112 » NoClassDefFound org/apa...
  TestMRCJCSocketFactory.testSocketFactory:85->initAndStartMiniMRYarnCluster:111 » NoClassDefFound

Tests run: 487, Failures: 0, Errors: 55, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.722 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:43 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 29.803 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  4.902 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:14 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:35 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [36:12 min]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 53:24 min
[INFO] Finished at: 2015-10-21T18:24:56+00:00
[INFO] Final Memory: 39M/719M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: ExecutionException: java.lang.RuntimeException: The forked VM terminated without properly saying goodbye. VM crash or System.exit called?
[ERROR] Command was /bin/sh -c cd /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient && /home/jenkins/tools/java/jdk1.7.0_55/jre/bin/java -Xmx2048m -XX:MaxPermSize=768m -XX:+HeapDumpOnOutOfMemoryError -jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefirebooter6882723851610486958.jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefire7472384509500060137tmp /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefire_1395402075964566682864tmp
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-8647
Updating HADOOP-12495
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
55 tests failed.
FAILED:  org.apache.hadoop.ipc.TestMRCJCSocketFactory.testSocketFactory

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.ipc.TestMRCJCSocketFactory.initAndStartMiniMRYarnCluster(TestMRCJCSocketFactory.java:111)
	at org.apache.hadoop.ipc.TestMRCJCSocketFactory.testSocketFactory(TestMRCJCSocketFactory.java:85)


FAILED:  org.apache.hadoop.mapred.TestClusterMRNotification.testMR

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.HadoopTestCase.setUp(HadoopTestCase.java:157)
	at org.apache.hadoop.mapred.NotificationTestCase.setUp(NotificationTestCase.java:145)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)
Caused by: java.lang.ClassNotFoundException: org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.HadoopTestCase.setUp(HadoopTestCase.java:157)
	at org.apache.hadoop.mapred.NotificationTestCase.setUp(NotificationTestCase.java:145)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)


FAILED:  org.apache.hadoop.mapred.TestClusterMapReduceTestCase.testMapReduceRestarting

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase$ConfigurableMiniMRCluster.<init>(ClusterMapReduceTestCase.java:101)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.startCluster(ClusterMapReduceTestCase.java:86)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.setUp(ClusterMapReduceTestCase.java:56)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)
Caused by: java.lang.ClassNotFoundException: org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase$ConfigurableMiniMRCluster.<init>(ClusterMapReduceTestCase.java:101)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.startCluster(ClusterMapReduceTestCase.java:86)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.setUp(ClusterMapReduceTestCase.java:56)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)


FAILED:  org.apache.hadoop.mapred.TestClusterMapReduceTestCase.testMRConfig

Error Message:
org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase$ConfigurableMiniMRCluster.<init>(ClusterMapReduceTestCase.java:101)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.startCluster(ClusterMapReduceTestCase.java:86)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.setUp(ClusterMapReduceTestCase.java:56)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)


FAILED:  org.apache.hadoop.mapred.TestClusterMapReduceTestCase.testDFSRestart

Error Message:
org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase$ConfigurableMiniMRCluster.<init>(ClusterMapReduceTestCase.java:101)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.startCluster(ClusterMapReduceTestCase.java:86)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.setUp(ClusterMapReduceTestCase.java:56)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)


FAILED:  org.apache.hadoop.mapred.TestClusterMapReduceTestCase.testMapReduce

Error Message:
org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase$ConfigurableMiniMRCluster.<init>(ClusterMapReduceTestCase.java:101)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.startCluster(ClusterMapReduceTestCase.java:86)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.setUp(ClusterMapReduceTestCase.java:56)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)


FAILED:  org.apache.hadoop.mapred.TestJobCleanup.org.apache.hadoop.mapred.TestJobCleanup

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:128)
	at org.apache.hadoop.mapred.TestJobCleanup.setUp(TestJobCleanup.java:73)


FAILED:  org.apache.hadoop.mapred.TestJobCounters.testHeapUsageCounter

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.TestJobCounters.testHeapUsageCounter(TestJobCounters.java:665)


FAILED:  org.apache.hadoop.mapred.TestJobName.testComplexNameWithRegex

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase$ConfigurableMiniMRCluster.<init>(ClusterMapReduceTestCase.java:101)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.startCluster(ClusterMapReduceTestCase.java:86)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.setUp(ClusterMapReduceTestCase.java:56)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)
Caused by: java.lang.ClassNotFoundException: org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase$ConfigurableMiniMRCluster.<init>(ClusterMapReduceTestCase.java:101)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.startCluster(ClusterMapReduceTestCase.java:86)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.setUp(ClusterMapReduceTestCase.java:56)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)


FAILED:  org.apache.hadoop.mapred.TestJobName.testComplexName

Error Message:
org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase$ConfigurableMiniMRCluster.<init>(ClusterMapReduceTestCase.java:101)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.startCluster(ClusterMapReduceTestCase.java:86)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.setUp(ClusterMapReduceTestCase.java:56)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)


FAILED:  org.apache.hadoop.mapred.TestJobSysDirWithDFS.testWithDFS

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:128)
	at org.apache.hadoop.mapred.TestJobSysDirWithDFS.testWithDFS(TestJobSysDirWithDFS.java:130)


FAILED:  org.apache.hadoop.mapred.TestLazyOutput.testLazyOutput

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.TestLazyOutput.testLazyOutput(TestLazyOutput.java:147)


FAILED:  org.apache.hadoop.mapred.TestMRIntermediateDataEncryption.testUberMode

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:41)
	at org.apache.hadoop.mapred.TestMRIntermediateDataEncryption.doEncryptionTest(TestMRIntermediateDataEncryption.java:90)
	at org.apache.hadoop.mapred.TestMRIntermediateDataEncryption.doEncryptionTest(TestMRIntermediateDataEncryption.java:75)
	at org.apache.hadoop.mapred.TestMRIntermediateDataEncryption.testUberMode(TestMRIntermediateDataEncryption.java:60)


FAILED:  org.apache.hadoop.mapred.TestMRIntermediateDataEncryption.testMultipleMapsPerNode

Error Message:
org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:41)
	at org.apache.hadoop.mapred.TestMRIntermediateDataEncryption.doEncryptionTest(TestMRIntermediateDataEncryption.java:90)
	at org.apache.hadoop.mapred.TestMRIntermediateDataEncryption.doEncryptionTest(TestMRIntermediateDataEncryption.java:75)
	at org.apache.hadoop.mapred.TestMRIntermediateDataEncryption.testMultipleMapsPerNode(TestMRIntermediateDataEncryption.java:65)


FAILED:  org.apache.hadoop.mapred.TestMRIntermediateDataEncryption.testMultipleReducers

Error Message:
org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:41)
	at org.apache.hadoop.mapred.TestMRIntermediateDataEncryption.doEncryptionTest(TestMRIntermediateDataEncryption.java:90)
	at org.apache.hadoop.mapred.TestMRIntermediateDataEncryption.doEncryptionTest(TestMRIntermediateDataEncryption.java:75)
	at org.apache.hadoop.mapred.TestMRIntermediateDataEncryption.testMultipleReducers(TestMRIntermediateDataEncryption.java:70)


FAILED:  org.apache.hadoop.mapred.TestMRIntermediateDataEncryption.testSingleReducer

Error Message:
org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:41)
	at org.apache.hadoop.mapred.TestMRIntermediateDataEncryption.doEncryptionTest(TestMRIntermediateDataEncryption.java:90)
	at org.apache.hadoop.mapred.TestMRIntermediateDataEncryption.doEncryptionTest(TestMRIntermediateDataEncryption.java:75)
	at org.apache.hadoop.mapred.TestMRIntermediateDataEncryption.testSingleReducer(TestMRIntermediateDataEncryption.java:55)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling(TestMRTimelineEventHandling.java:92)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testTimelineServiceStartInMiniCluster

Error Message:
org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testTimelineServiceStartInMiniCluster(TestMRTimelineEventHandling.java:51)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled

Error Message:
org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled(TestMRTimelineEventHandling.java:146)


FAILED:  org.apache.hadoop.mapred.TestMerge.testMerge

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:41)
	at org.apache.hadoop.mapred.TestMerge.testMerge(TestMerge.java:82)


FAILED:  org.apache.hadoop.mapred.TestMiniMRBringup.testBringUp

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.TestMiniMRBringup.testBringUp(TestMiniMRBringup.java:32)


FAILED:  org.apache.hadoop.mapred.TestMiniMRChildTask.org.apache.hadoop.mapred.TestMiniMRChildTask

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.TestMiniMRChildTask.setup(TestMiniMRChildTask.java:365)


FAILED:  org.apache.hadoop.mapred.TestMiniMRClasspath.testClassPath

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.TestMiniMRClasspath.testClassPath(TestMiniMRClasspath.java:175)


FAILED:  org.apache.hadoop.mapred.TestMiniMRClasspath.testExternalWritable

Error Message:
org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.TestMiniMRClasspath.testExternalWritable(TestMiniMRClasspath.java:207)


FAILED:  org.apache.hadoop.mapred.TestMiniMRClientCluster.org.apache.hadoop.mapred.TestMiniMRClientCluster

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:41)
	at org.apache.hadoop.mapred.TestMiniMRClientCluster.setup(TestMiniMRClientCluster.java:80)


FAILED:  org.apache.hadoop.mapred.TestMiniMRClientCluster.org.apache.hadoop.mapred.TestMiniMRClientCluster

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.mapred.TestMiniMRClientCluster.cleanup(TestMiniMRClientCluster.java:93)


FAILED:  org.apache.hadoop.mapred.TestMiniMRWithDFSWithDistinctUsers.testMultipleSpills

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.TestMiniMRWithDFSWithDistinctUsers.setUp(TestMiniMRWithDFSWithDistinctUsers.java:97)


FAILED:  org.apache.hadoop.mapred.TestMiniMRWithDFSWithDistinctUsers.testDistinctUsers

Error Message:
org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.TestMiniMRWithDFSWithDistinctUsers.setUp(TestMiniMRWithDFSWithDistinctUsers.java:97)


FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testGetJobStatus

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:41)
	at org.apache.hadoop.mapred.TestNetworkedJob.createMiniClusterWithCapacityScheduler(TestNetworkedJob.java:401)
	at org.apache.hadoop.mapred.TestNetworkedJob.testGetJobStatus(TestNetworkedJob.java:81)


FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testJobQueueClient

Error Message:
org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:41)
	at org.apache.hadoop.mapred.TestNetworkedJob.createMiniClusterWithCapacityScheduler(TestNetworkedJob.java:401)
	at org.apache.hadoop.mapred.TestNetworkedJob.testJobQueueClient(TestNetworkedJob.java:318)


FAILED:  org.apache.hadoop.mapred.TestReduceFetchFromPartialMem$1.org.apache.hadoop.mapred.TestReduceFetchFromPartialMem

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.TestReduceFetchFromPartialMem$1.setUp(TestReduceFetchFromPartialMem.java:61)


FAILED:  org.apache.hadoop.mapred.TestReduceFetchFromPartialMem$1.org.apache.hadoop.mapred.TestReduceFetchFromPartialMem

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.TestReduceFetchFromPartialMem$1.setUp(TestReduceFetchFromPartialMem.java:61)


FAILED:  org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath.testJobWithDFS

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath.testJobWithDFS(TestSpecialCharactersInOutputPath.java:112)


FAILED:  org.apache.hadoop.mapreduce.TestChild.testChild

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.HadoopTestCase.setUp(HadoopTestCase.java:157)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)
Caused by: java.lang.ClassNotFoundException: org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.HadoopTestCase.setUp(HadoopTestCase.java:157)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)


FAILED:  org.apache.hadoop.mapreduce.TestLargeSort.testLargeSort

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:41)
	at org.apache.hadoop.mapreduce.TestLargeSort.setup(TestLargeSort.java:40)


FAILED:  org.apache.hadoop.mapreduce.TestMRJobClient.testJobName

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase$ConfigurableMiniMRCluster.<init>(ClusterMapReduceTestCase.java:101)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.startCluster(ClusterMapReduceTestCase.java:86)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.setUp(ClusterMapReduceTestCase.java:56)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)
Caused by: java.lang.ClassNotFoundException: org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase$ConfigurableMiniMRCluster.<init>(ClusterMapReduceTestCase.java:101)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.startCluster(ClusterMapReduceTestCase.java:86)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.setUp(ClusterMapReduceTestCase.java:56)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)


FAILED:  org.apache.hadoop.mapreduce.TestMRJobClient.testJobSubmissionSpecsAndFiles

Error Message:
org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase$ConfigurableMiniMRCluster.<init>(ClusterMapReduceTestCase.java:101)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.startCluster(ClusterMapReduceTestCase.java:86)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.setUp(ClusterMapReduceTestCase.java:56)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)


FAILED:  org.apache.hadoop.mapreduce.TestMRJobClient.testJobClient

Error Message:
org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase$ConfigurableMiniMRCluster.<init>(ClusterMapReduceTestCase.java:101)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.startCluster(ClusterMapReduceTestCase.java:86)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.setUp(ClusterMapReduceTestCase.java:56)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)


FAILED:  org.apache.hadoop.mapreduce.TestMapReduceLazyOutput.testLazyOutput

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapreduce.TestMapReduceLazyOutput.testLazyOutput(TestMapReduceLazyOutput.java:137)


FAILED:  org.apache.hadoop.mapreduce.security.TestBinaryTokenFile.org.apache.hadoop.mapreduce.security.TestBinaryTokenFile

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapreduce.security.TestBinaryTokenFile.setUp(TestBinaryTokenFile.java:192)


FAILED:  org.apache.hadoop.mapreduce.security.TestMRCredentials.org.apache.hadoop.mapreduce.security.TestMRCredentials

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:41)
	at org.apache.hadoop.mapreduce.security.TestMRCredentials.setUp(TestMRCredentials.java:66)


FAILED:  org.apache.hadoop.mapreduce.security.ssl.TestEncryptedShuffle.encryptedShuffleWithClientCerts

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:41)
	at org.apache.hadoop.mapreduce.security.ssl.TestEncryptedShuffle.startCluster(TestEncryptedShuffle.java:107)
	at org.apache.hadoop.mapreduce.security.ssl.TestEncryptedShuffle.encryptedShuffleWithCerts(TestEncryptedShuffle.java:138)
	at org.apache.hadoop.mapreduce.security.ssl.TestEncryptedShuffle.encryptedShuffleWithClientCerts(TestEncryptedShuffle.java:167)


FAILED:  org.apache.hadoop.mapreduce.security.ssl.TestEncryptedShuffle.encryptedShuffleWithoutClientCerts

Error Message:
org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:41)
	at org.apache.hadoop.mapreduce.security.ssl.TestEncryptedShuffle.startCluster(TestEncryptedShuffle.java:107)
	at org.apache.hadoop.mapreduce.security.ssl.TestEncryptedShuffle.encryptedShuffleWithCerts(TestEncryptedShuffle.java:138)
	at org.apache.hadoop.mapreduce.security.ssl.TestEncryptedShuffle.encryptedShuffleWithoutClientCerts(TestEncryptedShuffle.java:172)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRAMWithNonNormalizedCapabilities.testJobWithNonNormalizedCapabilities

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapreduce.v2.TestMRAMWithNonNormalizedCapabilities.setup(TestMRAMWithNonNormalizedCapabilities.java:71)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRAppWithCombiner.org.apache.hadoop.mapreduce.v2.TestMRAppWithCombiner

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapreduce.v2.TestMRAppWithCombiner.setup(TestMRAppWithCombiner.java:79)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobs.org.apache.hadoop.mapreduce.v2.TestMRJobs

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.setup(TestMRJobs.java:162)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.setup(TestMRJobsWithHistoryService.java:91)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.setup(TestMRJobsWithProfiler.java:85)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMROldApiJobs.org.apache.hadoop.mapreduce.v2.TestMROldApiJobs

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapreduce.v2.TestMROldApiJobs.setup(TestMROldApiJobs.java:76)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMiniMRProxyUser.testValidProxyUser

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapreduce.v2.TestMiniMRProxyUser.setUp(TestMiniMRProxyUser.java:86)


FAILED:  org.apache.hadoop.mapreduce.v2.TestNonExistentJob.testGetInvalidJob

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapreduce.v2.TestNonExistentJob.setUp(TestNonExistentJob.java:73)


FAILED:  org.apache.hadoop.mapreduce.v2.TestRMNMInfo.org.apache.hadoop.mapreduce.v2.TestRMNMInfo

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapreduce.v2.TestRMNMInfo.setup(TestRMNMInfo.java:79)


FAILED:  org.apache.hadoop.mapreduce.v2.TestSpeculativeExecution.org.apache.hadoop.mapreduce.v2.TestSpeculativeExecution

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapreduce.v2.TestSpeculativeExecution.setup(TestSpeculativeExecution.java:117)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.org.apache.hadoop.mapreduce.v2.TestUberAM

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.setup(TestMRJobs.java:162)
	at org.apache.hadoop.mapreduce.v2.TestUberAM.setup(TestUberAM.java:45)


FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob

Error Message:
org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:372)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:122)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:496)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:987)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:260)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:41)
	at org.apache.hadoop.mapred.TestNetworkedJob.createMiniClusterWithCapacityScheduler(TestNetworkedJob.java:401)
	at org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob(TestNetworkedJob.java:133)



Hadoop-Mapreduce-trunk - Build # 2509 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2509/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 33396 lines...]
Running org.apache.hadoop.mapred.pipes.TestPipesNonJavaInputFormat
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.41 sec - in org.apache.hadoop.mapred.pipes.TestPipesNonJavaInputFormat
Running org.apache.hadoop.mapred.pipes.TestPipeApplication
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.597 sec - in org.apache.hadoop.mapred.pipes.TestPipeApplication
Running org.apache.hadoop.mapred.TestJavaSerialization
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 4.558 sec - in org.apache.hadoop.mapred.TestJavaSerialization
Running org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 27.284 sec - in org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Running org.apache.hadoop.ipc.TestMRCJCSocketFactory
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 20.534 sec - in org.apache.hadoop.ipc.TestMRCJCSocketFactory

Results :

Tests in error: 
  TestNetworkedJob.testNetworkedJob:260 » IO org.apache.hadoop.yarn.exceptions.Y...

Tests run: 523, Failures: 0, Errors: 1, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.880 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:41 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 29.492 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  4.836 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:18 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:33 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:45 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:02 h
[INFO] Finished at: 2015-10-21T17:02:18+00:00
[INFO] Final Memory: 36M/690M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating MAPREDUCE-6489
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob

Error Message:
org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
 at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
 at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
 at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2285)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
 ... 10 more


Stack Trace:
java.io.IOException: org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
	at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
	at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2285)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
	... 10 more

	at org.apache.hadoop.ipc.Client.call(Client.java:1443)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy84.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getDelegationToken(ApplicationClientProtocolPBClientImpl.java:339)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:255)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy85.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getRMDelegationToken(YarnClientImpl.java:541)
	at org.apache.hadoop.mapred.ResourceMgrDelegate.getDelegationToken(ResourceMgrDelegate.java:177)
	at org.apache.hadoop.mapred.YARNRunner.getDelegationToken(YARNRunner.java:231)
	at org.apache.hadoop.mapreduce.Cluster.getDelegationToken(Cluster.java:401)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1234)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1231)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapred.JobClient.getDelegationToken(JobClient.java:1230)
	at org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob(TestNetworkedJob.java:260)



Hadoop-Mapreduce-trunk - Build # 2508 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2508/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 33393 lines...]
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 190.502 sec - in org.apache.hadoop.mapreduce.TestMRJobClient
Running org.apache.hadoop.mapreduce.TestMapCollection
Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 15.666 sec - in org.apache.hadoop.mapreduce.TestMapCollection
Running org.apache.hadoop.conf.TestNoDefaultsJobConf
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 25.02 sec - in org.apache.hadoop.conf.TestNoDefaultsJobConf
Running org.apache.hadoop.util.TestMRCJCReflectionUtils
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.804 sec - in org.apache.hadoop.util.TestMRCJCReflectionUtils
Running org.apache.hadoop.util.TestMRCJCRunJar
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.332 sec - in org.apache.hadoop.util.TestMRCJCRunJar

Results :

Tests in error: 
  TestNetworkedJob.testNetworkedJob:260 » IO org.apache.hadoop.yarn.exceptions.Y...

Tests run: 523, Failures: 0, Errors: 1, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.081 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:37 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 29.327 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.787 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:10 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:42 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:45 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:02 h
[INFO] Finished at: 2015-10-21T03:09:40+00:00
[INFO] Final Memory: 43M/1051M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating YARN-3985
Updating MAPREDUCE-6495
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob

Error Message:
org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
 at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
 at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
 at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2285)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
 ... 10 more


Stack Trace:
java.io.IOException: org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
	at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
	at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2285)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
	... 10 more

	at org.apache.hadoop.ipc.Client.call(Client.java:1443)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy84.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getDelegationToken(ApplicationClientProtocolPBClientImpl.java:339)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:255)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy85.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getRMDelegationToken(YarnClientImpl.java:541)
	at org.apache.hadoop.mapred.ResourceMgrDelegate.getDelegationToken(ResourceMgrDelegate.java:177)
	at org.apache.hadoop.mapred.YARNRunner.getDelegationToken(YARNRunner.java:231)
	at org.apache.hadoop.mapreduce.Cluster.getDelegationToken(Cluster.java:401)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1234)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1231)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapred.JobClient.getDelegationToken(JobClient.java:1230)
	at org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob(TestNetworkedJob.java:260)



Hadoop-Mapreduce-trunk - Build # 2507 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2507/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 33426 lines...]
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 6.047 sec - in org.apache.hadoop.mapred.pipes.TestPipeApplication
Running org.apache.hadoop.mapred.TestJavaSerialization
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 4.668 sec - in org.apache.hadoop.mapred.TestJavaSerialization
Running org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 26.993 sec - in org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Running org.apache.hadoop.ipc.TestMRCJCSocketFactory
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 20.963 sec - in org.apache.hadoop.ipc.TestMRCJCSocketFactory

Results :

Tests in error: 
  TestNetworkedJob.testNetworkedJob:260 » IO org.apache.hadoop.yarn.exceptions.Y...
  TestFixedLengthInputFormat.testFormat:81->runRandomTests:328 » NoClassDefFound
  TestFixedLengthInputFormat.testPartialRecordUncompressedIn:190->runPartialRecordTest:410->readSplit:371 » NoClassDefFound

Tests run: 523, Failures: 0, Errors: 3, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.897 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:34 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 29.407 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  4.907 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:11 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:34 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:40 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 01:57 h
[INFO] Finished at: 2015-10-21T00:11:16+00:00
[INFO] Final Memory: 34M/719M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-3059
Updating HADOOP-12418
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
3 tests failed.
FAILED:  org.apache.hadoop.mapred.TestFixedLengthInputFormat.testFormat

Error Message:
org/apache/hadoop/fs/ContentSummary$Builder

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/fs/ContentSummary$Builder
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.fs.FileSystem.getContentSummary(FileSystem.java:1469)
	at org.apache.hadoop.fs.ChecksumFileSystem$FSDataBoundedInputStream.getFileLength(ChecksumFileSystem.java:279)
	at org.apache.hadoop.fs.ChecksumFileSystem$FSDataBoundedInputStream.seek(ChecksumFileSystem.java:322)
	at org.apache.hadoop.mapreduce.lib.input.FixedLengthRecordReader.initialize(FixedLengthRecordReader.java:108)
	at org.apache.hadoop.mapred.FixedLengthRecordReader.<init>(FixedLengthRecordReader.java:47)
	at org.apache.hadoop.mapred.FixedLengthInputFormat.getRecordReader(FixedLengthInputFormat.java:87)
	at org.apache.hadoop.mapred.TestFixedLengthInputFormat.runRandomTests(TestFixedLengthInputFormat.java:328)
	at org.apache.hadoop.mapred.TestFixedLengthInputFormat.testFormat(TestFixedLengthInputFormat.java:81)


FAILED:  org.apache.hadoop.mapred.TestFixedLengthInputFormat.testPartialRecordUncompressedIn

Error Message:
org/apache/hadoop/fs/ContentSummary$Builder

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/fs/ContentSummary$Builder
	at org.apache.hadoop.fs.FileSystem.getContentSummary(FileSystem.java:1469)
	at org.apache.hadoop.fs.ChecksumFileSystem$FSDataBoundedInputStream.getFileLength(ChecksumFileSystem.java:279)
	at org.apache.hadoop.fs.ChecksumFileSystem$FSDataBoundedInputStream.seek(ChecksumFileSystem.java:322)
	at org.apache.hadoop.mapreduce.lib.input.FixedLengthRecordReader.initialize(FixedLengthRecordReader.java:108)
	at org.apache.hadoop.mapred.FixedLengthRecordReader.<init>(FixedLengthRecordReader.java:47)
	at org.apache.hadoop.mapred.FixedLengthInputFormat.getRecordReader(FixedLengthInputFormat.java:87)
	at org.apache.hadoop.mapred.TestFixedLengthInputFormat.readSplit(TestFixedLengthInputFormat.java:371)
	at org.apache.hadoop.mapred.TestFixedLengthInputFormat.runPartialRecordTest(TestFixedLengthInputFormat.java:410)
	at org.apache.hadoop.mapred.TestFixedLengthInputFormat.testPartialRecordUncompressedIn(TestFixedLengthInputFormat.java:190)


FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob

Error Message:
org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
 at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
 at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
 at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2285)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
 ... 10 more


Stack Trace:
java.io.IOException: org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
	at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
	at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2285)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
	... 10 more

	at org.apache.hadoop.ipc.Client.call(Client.java:1443)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy84.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getDelegationToken(ApplicationClientProtocolPBClientImpl.java:339)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:255)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy85.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getRMDelegationToken(YarnClientImpl.java:541)
	at org.apache.hadoop.mapred.ResourceMgrDelegate.getDelegationToken(ResourceMgrDelegate.java:177)
	at org.apache.hadoop.mapred.YARNRunner.getDelegationToken(YARNRunner.java:231)
	at org.apache.hadoop.mapreduce.Cluster.getDelegationToken(Cluster.java:401)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1234)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1231)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapred.JobClient.getDelegationToken(JobClient.java:1230)
	at org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob(TestNetworkedJob.java:260)



Hadoop-Mapreduce-trunk - Build # 2506 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2506/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 36155 lines...]
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 14.526 sec - in org.apache.hadoop.mapred.TestReporter
Running org.apache.hadoop.mapred.TestClientRedirect
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 16.738 sec - in org.apache.hadoop.mapred.TestClientRedirect
Running org.apache.hadoop.mapred.TestReduceFetchFromPartialMem
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 82.84 sec - in org.apache.hadoop.mapred.TestReduceFetchFromPartialMem
Running org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 29.45 sec - in org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Running org.apache.hadoop.mapred.TestSequenceFileAsBinaryOutputFormat
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.788 sec - in org.apache.hadoop.mapred.TestSequenceFileAsBinaryOutputFormat

Results :

Tests in error: 
  TestMRJobsWithProfiler.testDifferentProfilers:117->testProfilerInternal:155 » 
  TestNetworkedJob.testNetworkedJob:260 » IO org.apache.hadoop.yarn.exceptions.Y...

Tests run: 523, Failures: 0, Errors: 2, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.721 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [02:04 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 36.102 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  6.340 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [10:39 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [06:21 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:51 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:11 h
[INFO] Finished at: 2015-10-20T21:18:33+00:00
[INFO] Final Memory: 34M/762M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9270
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob

Error Message:
org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
 at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
 at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
 at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2285)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
 ... 10 more


Stack Trace:
java.io.IOException: org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
	at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
	at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2285)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
	... 10 more

	at org.apache.hadoop.ipc.Client.call(Client.java:1443)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy84.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getDelegationToken(ApplicationClientProtocolPBClientImpl.java:339)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:255)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy85.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getRMDelegationToken(YarnClientImpl.java:541)
	at org.apache.hadoop.mapred.ResourceMgrDelegate.getDelegationToken(ResourceMgrDelegate.java:177)
	at org.apache.hadoop.mapred.YARNRunner.getDelegationToken(YARNRunner.java:231)
	at org.apache.hadoop.mapreduce.Cluster.getDelegationToken(Cluster.java:401)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1234)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1231)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapred.JobClient.getDelegationToken(JobClient.java:1230)
	at org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob(TestNetworkedJob.java:260)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testDifferentProfilers

Error Message:
test timed out after 150000 milliseconds

Stack Trace:
java.lang.Exception: test timed out after 150000 milliseconds
	at java.lang.Thread.sleep(Native Method)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testProfilerInternal(TestMRJobsWithProfiler.java:155)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testDifferentProfilers(TestMRJobsWithProfiler.java:117)



Hadoop-Mapreduce-trunk - Build # 2505 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2505/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32854 lines...]
Tests run: 12, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 21.933 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebApp
Running org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobConf
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 8.589 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobConf
Running org.apache.hadoop.mapred.TestTaskAttemptListenerImpl
Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.896 sec - in org.apache.hadoop.mapred.TestTaskAttemptListenerImpl
Running org.apache.hadoop.mapred.TestTaskAttemptFinishingMonitor
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.248 sec - in org.apache.hadoop.mapred.TestTaskAttemptFinishingMonitor
Running org.apache.hadoop.mapred.TestLocalContainerLauncher
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.617 sec - in org.apache.hadoop.mapred.TestLocalContainerLauncher

Results :

Failed tests: 
  TestTaskAttempt.testMRAppHistoryForTAFailedInAssigned:177->testTaskAttemptAssignedKilledHistory:388 No Ta Started JH Event
  TestJobImpl.testUnusableNodeTransition:627->assertJobState:977 expected:<SUCCEEDED> but was:<ERROR>

Tests run: 339, Failures: 2, Errors: 0, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  4.054 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [02:07 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 39.384 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  6.826 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [10:45 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 13:45 min
[INFO] Finished at: 2015-10-20T18:22:07+00:00
[INFO] Final Memory: 44M/1213M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9251
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl.testUnusableNodeTransition

Error Message:
expected:<SUCCEEDED> but was:<ERROR>

Stack Trace:
java.lang.AssertionError: expected:<SUCCEEDED> but was:<ERROR>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:144)
	at org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl.assertJobState(TestJobImpl.java:977)
	at org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl.testUnusableNodeTransition(TestJobImpl.java:627)


FAILED:  org.apache.hadoop.mapreduce.v2.app.job.impl.TestTaskAttempt.testMRAppHistoryForTAFailedInAssigned

Error Message:
No Ta Started JH Event

Stack Trace:
java.lang.AssertionError: No Ta Started JH Event
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.assertTrue(Assert.java:41)
	at org.apache.hadoop.mapreduce.v2.app.job.impl.TestTaskAttempt.testTaskAttemptAssignedKilledHistory(TestTaskAttempt.java:388)
	at org.apache.hadoop.mapreduce.v2.app.job.impl.TestTaskAttempt.testMRAppHistoryForTAFailedInAssigned(TestTaskAttempt.java:177)



Hadoop-Mapreduce-trunk - Build # 2504 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2504/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 33394 lines...]
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 233.251 sec - in org.apache.hadoop.mapreduce.lib.output.TestJobOutputCommitter
Running org.apache.hadoop.mapreduce.lib.output.TestMRCJCFileOutputCommitter
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.6 sec - in org.apache.hadoop.mapreduce.lib.output.TestMRCJCFileOutputCommitter
Running org.apache.hadoop.util.TestMRCJCReflectionUtils
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.694 sec - in org.apache.hadoop.util.TestMRCJCReflectionUtils
Running org.apache.hadoop.util.TestMRCJCRunJar
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.253 sec - in org.apache.hadoop.util.TestMRCJCRunJar
Running org.apache.hadoop.io.TestSequenceFileMergeProgress
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.413 sec - in org.apache.hadoop.io.TestSequenceFileMergeProgress

Results :

Tests in error: 
  TestNetworkedJob.testNetworkedJob:260 » IO org.apache.hadoop.yarn.exceptions.Y...

Tests run: 523, Failures: 0, Errors: 1, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.780 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:37 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 30.007 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.226 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:24 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:36 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:39 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 01:56 h
[INFO] Finished at: 2015-10-20T06:23:49+00:00
[INFO] Final Memory: 34M/719M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating YARN-4270
Updating HDFS-9208
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob

Error Message:
org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
 at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
 at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
 at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2285)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
 ... 10 more


Stack Trace:
java.io.IOException: org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
	at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
	at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2285)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
	... 10 more

	at org.apache.hadoop.ipc.Client.call(Client.java:1443)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy84.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getDelegationToken(ApplicationClientProtocolPBClientImpl.java:339)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:255)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy85.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getRMDelegationToken(YarnClientImpl.java:541)
	at org.apache.hadoop.mapred.ResourceMgrDelegate.getDelegationToken(ResourceMgrDelegate.java:177)
	at org.apache.hadoop.mapred.YARNRunner.getDelegationToken(YARNRunner.java:231)
	at org.apache.hadoop.mapreduce.Cluster.getDelegationToken(Cluster.java:401)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1234)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1231)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapred.JobClient.getDelegationToken(JobClient.java:1230)
	at org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob(TestNetworkedJob.java:260)



Hadoop-Mapreduce-trunk - Build # 2503 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2503/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 36336 lines...]
  TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled:146 » NoClassDefFound
  TestMiniMRClasspath.testClassPath:175 » NoClassDefFound org/apache/hadoop/yarn...
  TestMiniMRClasspath.testExternalWritable:207 » NoClassDefFound org/apache/hado...
  TestJobName>ClusterMapReduceTestCase.setUp:56->ClusterMapReduceTestCase.startCluster:86 » NoClassDefFound
  TestJobName>ClusterMapReduceTestCase.setUp:56->ClusterMapReduceTestCase.startCluster:86 » NoClassDefFound
  TestMiniMRWithDFSWithDistinctUsers.setUp:97 » NoClassDefFound org/apache/hadoo...
  TestMiniMRWithDFSWithDistinctUsers.setUp:97 » NoClassDefFound org/apache/hadoo...
  TestJobCounters.testHeapUsageCounter:665 » NoClassDefFound org/apache/hadoop/y...
  TestJobSysDirWithDFS.testWithDFS:130 » NoClassDefFound org/apache/hadoop/yarn/...
  TestLazyOutput.testLazyOutput:147 » NoClassDefFound org/apache/hadoop/yarn/ser...
  TestMiniMRBringup.testBringUp:32 » NoClassDefFound org/apache/hadoop/yarn/serv...
  TestMRIntermediateDataEncryption.testUberMode:60->doEncryptionTest:75->doEncryptionTest:90 » NoClassDefFound
  TestMRIntermediateDataEncryption.testMultipleMapsPerNode:65->doEncryptionTest:75->doEncryptionTest:90 » NoClassDefFound
  TestMRIntermediateDataEncryption.testMultipleReducers:70->doEncryptionTest:75->doEncryptionTest:90 » NoClassDefFound
  TestMRIntermediateDataEncryption.testSingleReducer:55->doEncryptionTest:75->doEncryptionTest:90 » NoClassDefFound
  TestSpecialCharactersInOutputPath.testJobWithDFS:112 » NoClassDefFound org/apa...

Tests run: 499, Failures: 0, Errors: 71, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.908 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:35 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 29.749 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.034 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:18 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:36 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [21:33 min]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 38:43 min
[INFO] Finished at: 2015-10-20T03:37:55+00:00
[INFO] Final Memory: 45M/719M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: ExecutionException: java.lang.RuntimeException: The forked VM terminated without properly saying goodbye. VM crash or System.exit called?
[ERROR] Command was /bin/sh -c cd /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient && /home/jenkins/tools/java/jdk1.7.0_55/jre/bin/java -Xmx2048m -XX:MaxPermSize=768m -XX:+HeapDumpOnOutOfMemoryError -jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefirebooter7551266921745407539.jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefire6815530498811400717tmp /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefire_1352793422593487035837tmp
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HADOOP-12493
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
71 tests failed.
FAILED:  org.apache.hadoop.conf.TestNoDefaultsJobConf.testNoDefaults

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.HadoopTestCase.setUp(HadoopTestCase.java:157)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)
Caused by: java.lang.ClassNotFoundException: org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.HadoopTestCase.setUp(HadoopTestCase.java:157)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)


FAILED:  org.apache.hadoop.fs.TestFileSystem.testCommandFormat

Error Message:
org/apache/hadoop/fs/shell/CommandFormat

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/fs/shell/CommandFormat
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.fs.TestFileSystem.testCommandFormat(TestFileSystem.java:96)


FAILED:  org.apache.hadoop.ipc.TestMRCJCSocketFactory.testSocketFactory

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.ipc.TestMRCJCSocketFactory.initAndStartMiniMRYarnCluster(TestMRCJCSocketFactory.java:111)
	at org.apache.hadoop.ipc.TestMRCJCSocketFactory.testSocketFactory(TestMRCJCSocketFactory.java:85)


FAILED:  org.apache.hadoop.mapred.TestClusterMRNotification.testMR

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.HadoopTestCase.setUp(HadoopTestCase.java:157)
	at org.apache.hadoop.mapred.NotificationTestCase.setUp(NotificationTestCase.java:145)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)
Caused by: java.lang.ClassNotFoundException: org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.HadoopTestCase.setUp(HadoopTestCase.java:157)
	at org.apache.hadoop.mapred.NotificationTestCase.setUp(NotificationTestCase.java:145)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)


FAILED:  org.apache.hadoop.mapred.TestClusterMapReduceTestCase.testMapReduceRestarting

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase$ConfigurableMiniMRCluster.<init>(ClusterMapReduceTestCase.java:101)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.startCluster(ClusterMapReduceTestCase.java:86)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.setUp(ClusterMapReduceTestCase.java:56)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)
Caused by: java.lang.ClassNotFoundException: org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase$ConfigurableMiniMRCluster.<init>(ClusterMapReduceTestCase.java:101)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.startCluster(ClusterMapReduceTestCase.java:86)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.setUp(ClusterMapReduceTestCase.java:56)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)


FAILED:  org.apache.hadoop.mapred.TestClusterMapReduceTestCase.testMRConfig

Error Message:
org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase$ConfigurableMiniMRCluster.<init>(ClusterMapReduceTestCase.java:101)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.startCluster(ClusterMapReduceTestCase.java:86)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.setUp(ClusterMapReduceTestCase.java:56)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)


FAILED:  org.apache.hadoop.mapred.TestClusterMapReduceTestCase.testDFSRestart

Error Message:
org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase$ConfigurableMiniMRCluster.<init>(ClusterMapReduceTestCase.java:101)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.startCluster(ClusterMapReduceTestCase.java:86)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.setUp(ClusterMapReduceTestCase.java:56)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)


FAILED:  org.apache.hadoop.mapred.TestClusterMapReduceTestCase.testMapReduce

Error Message:
org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase$ConfigurableMiniMRCluster.<init>(ClusterMapReduceTestCase.java:101)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.startCluster(ClusterMapReduceTestCase.java:86)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.setUp(ClusterMapReduceTestCase.java:56)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)


FAILED:  org.apache.hadoop.mapred.TestJobCleanup.org.apache.hadoop.mapred.TestJobCleanup

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:128)
	at org.apache.hadoop.mapred.TestJobCleanup.setUp(TestJobCleanup.java:73)


FAILED:  org.apache.hadoop.mapred.TestJobCounters.testHeapUsageCounter

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.TestJobCounters.testHeapUsageCounter(TestJobCounters.java:665)


FAILED:  org.apache.hadoop.mapred.TestJobName.testComplexNameWithRegex

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase$ConfigurableMiniMRCluster.<init>(ClusterMapReduceTestCase.java:101)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.startCluster(ClusterMapReduceTestCase.java:86)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.setUp(ClusterMapReduceTestCase.java:56)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)
Caused by: java.lang.ClassNotFoundException: org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase$ConfigurableMiniMRCluster.<init>(ClusterMapReduceTestCase.java:101)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.startCluster(ClusterMapReduceTestCase.java:86)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.setUp(ClusterMapReduceTestCase.java:56)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)


FAILED:  org.apache.hadoop.mapred.TestJobName.testComplexName

Error Message:
org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase$ConfigurableMiniMRCluster.<init>(ClusterMapReduceTestCase.java:101)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.startCluster(ClusterMapReduceTestCase.java:86)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.setUp(ClusterMapReduceTestCase.java:56)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)


FAILED:  org.apache.hadoop.mapred.TestJobSysDirWithDFS.testWithDFS

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:128)
	at org.apache.hadoop.mapred.TestJobSysDirWithDFS.testWithDFS(TestJobSysDirWithDFS.java:130)


FAILED:  org.apache.hadoop.mapred.TestLazyOutput.testLazyOutput

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.TestLazyOutput.testLazyOutput(TestLazyOutput.java:147)


FAILED:  org.apache.hadoop.mapred.TestMRIntermediateDataEncryption.testUberMode

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:41)
	at org.apache.hadoop.mapred.TestMRIntermediateDataEncryption.doEncryptionTest(TestMRIntermediateDataEncryption.java:90)
	at org.apache.hadoop.mapred.TestMRIntermediateDataEncryption.doEncryptionTest(TestMRIntermediateDataEncryption.java:75)
	at org.apache.hadoop.mapred.TestMRIntermediateDataEncryption.testUberMode(TestMRIntermediateDataEncryption.java:60)


FAILED:  org.apache.hadoop.mapred.TestMRIntermediateDataEncryption.testMultipleMapsPerNode

Error Message:
org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:41)
	at org.apache.hadoop.mapred.TestMRIntermediateDataEncryption.doEncryptionTest(TestMRIntermediateDataEncryption.java:90)
	at org.apache.hadoop.mapred.TestMRIntermediateDataEncryption.doEncryptionTest(TestMRIntermediateDataEncryption.java:75)
	at org.apache.hadoop.mapred.TestMRIntermediateDataEncryption.testMultipleMapsPerNode(TestMRIntermediateDataEncryption.java:65)


FAILED:  org.apache.hadoop.mapred.TestMRIntermediateDataEncryption.testMultipleReducers

Error Message:
org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:41)
	at org.apache.hadoop.mapred.TestMRIntermediateDataEncryption.doEncryptionTest(TestMRIntermediateDataEncryption.java:90)
	at org.apache.hadoop.mapred.TestMRIntermediateDataEncryption.doEncryptionTest(TestMRIntermediateDataEncryption.java:75)
	at org.apache.hadoop.mapred.TestMRIntermediateDataEncryption.testMultipleReducers(TestMRIntermediateDataEncryption.java:70)


FAILED:  org.apache.hadoop.mapred.TestMRIntermediateDataEncryption.testSingleReducer

Error Message:
org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:41)
	at org.apache.hadoop.mapred.TestMRIntermediateDataEncryption.doEncryptionTest(TestMRIntermediateDataEncryption.java:90)
	at org.apache.hadoop.mapred.TestMRIntermediateDataEncryption.doEncryptionTest(TestMRIntermediateDataEncryption.java:75)
	at org.apache.hadoop.mapred.TestMRIntermediateDataEncryption.testSingleReducer(TestMRIntermediateDataEncryption.java:55)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling(TestMRTimelineEventHandling.java:92)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testTimelineServiceStartInMiniCluster

Error Message:
org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testTimelineServiceStartInMiniCluster(TestMRTimelineEventHandling.java:51)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled

Error Message:
org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled(TestMRTimelineEventHandling.java:146)


FAILED:  org.apache.hadoop.mapred.TestMerge.testMerge

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:41)
	at org.apache.hadoop.mapred.TestMerge.testMerge(TestMerge.java:82)


FAILED:  org.apache.hadoop.mapred.TestMiniMRBringup.testBringUp

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.TestMiniMRBringup.testBringUp(TestMiniMRBringup.java:32)


FAILED:  org.apache.hadoop.mapred.TestMiniMRChildTask.org.apache.hadoop.mapred.TestMiniMRChildTask

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.TestMiniMRChildTask.setup(TestMiniMRChildTask.java:365)


FAILED:  org.apache.hadoop.mapred.TestMiniMRClasspath.testClassPath

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.TestMiniMRClasspath.testClassPath(TestMiniMRClasspath.java:175)


FAILED:  org.apache.hadoop.mapred.TestMiniMRClasspath.testExternalWritable

Error Message:
org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.TestMiniMRClasspath.testExternalWritable(TestMiniMRClasspath.java:207)


FAILED:  org.apache.hadoop.mapred.TestMiniMRClientCluster.org.apache.hadoop.mapred.TestMiniMRClientCluster

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:41)
	at org.apache.hadoop.mapred.TestMiniMRClientCluster.setup(TestMiniMRClientCluster.java:80)


FAILED:  org.apache.hadoop.mapred.TestMiniMRClientCluster.org.apache.hadoop.mapred.TestMiniMRClientCluster

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.mapred.TestMiniMRClientCluster.cleanup(TestMiniMRClientCluster.java:93)


FAILED:  org.apache.hadoop.mapred.TestMiniMRWithDFSWithDistinctUsers.testMultipleSpills

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.TestMiniMRWithDFSWithDistinctUsers.setUp(TestMiniMRWithDFSWithDistinctUsers.java:97)


FAILED:  org.apache.hadoop.mapred.TestMiniMRWithDFSWithDistinctUsers.testDistinctUsers

Error Message:
org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.TestMiniMRWithDFSWithDistinctUsers.setUp(TestMiniMRWithDFSWithDistinctUsers.java:97)


FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testGetJobStatus

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:41)
	at org.apache.hadoop.mapred.TestNetworkedJob.createMiniClusterWithCapacityScheduler(TestNetworkedJob.java:401)
	at org.apache.hadoop.mapred.TestNetworkedJob.testGetJobStatus(TestNetworkedJob.java:81)


FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testJobQueueClient

Error Message:
org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:41)
	at org.apache.hadoop.mapred.TestNetworkedJob.createMiniClusterWithCapacityScheduler(TestNetworkedJob.java:401)
	at org.apache.hadoop.mapred.TestNetworkedJob.testJobQueueClient(TestNetworkedJob.java:318)


FAILED:  org.apache.hadoop.mapred.TestReduceFetchFromPartialMem$1.org.apache.hadoop.mapred.TestReduceFetchFromPartialMem

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.TestReduceFetchFromPartialMem$1.setUp(TestReduceFetchFromPartialMem.java:61)


FAILED:  org.apache.hadoop.mapred.TestReduceFetchFromPartialMem$1.org.apache.hadoop.mapred.TestReduceFetchFromPartialMem

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.TestReduceFetchFromPartialMem$1.setUp(TestReduceFetchFromPartialMem.java:61)


FAILED:  org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath.testJobWithDFS

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath.testJobWithDFS(TestSpecialCharactersInOutputPath.java:112)


FAILED:  org.apache.hadoop.mapreduce.TestChild.testChild

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.HadoopTestCase.setUp(HadoopTestCase.java:157)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)
Caused by: java.lang.ClassNotFoundException: org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.HadoopTestCase.setUp(HadoopTestCase.java:157)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)


FAILED:  org.apache.hadoop.mapreduce.TestLargeSort.testLargeSort

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:41)
	at org.apache.hadoop.mapreduce.TestLargeSort.setup(TestLargeSort.java:40)


FAILED:  org.apache.hadoop.mapreduce.TestMRJobClient.testJobName

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase$ConfigurableMiniMRCluster.<init>(ClusterMapReduceTestCase.java:101)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.startCluster(ClusterMapReduceTestCase.java:86)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.setUp(ClusterMapReduceTestCase.java:56)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)
Caused by: java.lang.ClassNotFoundException: org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase$ConfigurableMiniMRCluster.<init>(ClusterMapReduceTestCase.java:101)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.startCluster(ClusterMapReduceTestCase.java:86)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.setUp(ClusterMapReduceTestCase.java:56)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)


FAILED:  org.apache.hadoop.mapreduce.TestMRJobClient.testJobSubmissionSpecsAndFiles

Error Message:
org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase$ConfigurableMiniMRCluster.<init>(ClusterMapReduceTestCase.java:101)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.startCluster(ClusterMapReduceTestCase.java:86)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.setUp(ClusterMapReduceTestCase.java:56)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)


FAILED:  org.apache.hadoop.mapreduce.TestMRJobClient.testJobClient

Error Message:
org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase$ConfigurableMiniMRCluster.<init>(ClusterMapReduceTestCase.java:101)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.startCluster(ClusterMapReduceTestCase.java:86)
	at org.apache.hadoop.mapred.ClusterMapReduceTestCase.setUp(ClusterMapReduceTestCase.java:56)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)


FAILED:  org.apache.hadoop.mapreduce.TestMapReduceLazyOutput.testLazyOutput

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapreduce.TestMapReduceLazyOutput.testLazyOutput(TestMapReduceLazyOutput.java:137)


FAILED:  org.apache.hadoop.mapreduce.lib.output.TestJobOutputCommitter.testCustomCleanup

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.HadoopTestCase.setUp(HadoopTestCase.java:157)
	at org.apache.hadoop.mapreduce.lib.output.TestJobOutputCommitter.setUp(TestJobOutputCommitter.java:59)


FAILED:  org.apache.hadoop.mapreduce.lib.output.TestJobOutputCommitter.testDefaultCleanupAndAbort

Error Message:
org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.HadoopTestCase.setUp(HadoopTestCase.java:157)
	at org.apache.hadoop.mapreduce.lib.output.TestJobOutputCommitter.setUp(TestJobOutputCommitter.java:59)


FAILED:  org.apache.hadoop.mapreduce.lib.output.TestJobOutputCommitter.testCustomAbort

Error Message:
org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.HadoopTestCase.setUp(HadoopTestCase.java:157)
	at org.apache.hadoop.mapreduce.lib.output.TestJobOutputCommitter.setUp(TestJobOutputCommitter.java:59)


FAILED:  org.apache.hadoop.mapreduce.security.TestBinaryTokenFile.org.apache.hadoop.mapreduce.security.TestBinaryTokenFile

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapreduce.security.TestBinaryTokenFile.setUp(TestBinaryTokenFile.java:192)


FAILED:  org.apache.hadoop.mapreduce.security.TestMRCredentials.org.apache.hadoop.mapreduce.security.TestMRCredentials

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:41)
	at org.apache.hadoop.mapreduce.security.TestMRCredentials.setUp(TestMRCredentials.java:66)


FAILED:  org.apache.hadoop.mapreduce.security.ssl.TestEncryptedShuffle.encryptedShuffleWithClientCerts

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:41)
	at org.apache.hadoop.mapreduce.security.ssl.TestEncryptedShuffle.startCluster(TestEncryptedShuffle.java:107)
	at org.apache.hadoop.mapreduce.security.ssl.TestEncryptedShuffle.encryptedShuffleWithCerts(TestEncryptedShuffle.java:138)
	at org.apache.hadoop.mapreduce.security.ssl.TestEncryptedShuffle.encryptedShuffleWithClientCerts(TestEncryptedShuffle.java:167)


FAILED:  org.apache.hadoop.mapreduce.security.ssl.TestEncryptedShuffle.encryptedShuffleWithoutClientCerts

Error Message:
org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:41)
	at org.apache.hadoop.mapreduce.security.ssl.TestEncryptedShuffle.startCluster(TestEncryptedShuffle.java:107)
	at org.apache.hadoop.mapreduce.security.ssl.TestEncryptedShuffle.encryptedShuffleWithCerts(TestEncryptedShuffle.java:138)
	at org.apache.hadoop.mapreduce.security.ssl.TestEncryptedShuffle.encryptedShuffleWithoutClientCerts(TestEncryptedShuffle.java:172)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRAMWithNonNormalizedCapabilities.testJobWithNonNormalizedCapabilities

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapreduce.v2.TestMRAMWithNonNormalizedCapabilities.setup(TestMRAMWithNonNormalizedCapabilities.java:71)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRAppWithCombiner.org.apache.hadoop.mapreduce.v2.TestMRAppWithCombiner

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapreduce.v2.TestMRAppWithCombiner.setup(TestMRAppWithCombiner.java:79)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobs.org.apache.hadoop.mapreduce.v2.TestMRJobs

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.setup(TestMRJobs.java:162)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.testJobHistoryData

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService.setup(TestMRJobsWithHistoryService.java:91)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.setup(TestMRJobsWithProfiler.java:85)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMROldApiJobs.org.apache.hadoop.mapreduce.v2.TestMROldApiJobs

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapreduce.v2.TestMROldApiJobs.setup(TestMROldApiJobs.java:76)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMiniMRProxyUser.testValidProxyUser

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapreduce.v2.TestMiniMRProxyUser.setUp(TestMiniMRProxyUser.java:86)


FAILED:  org.apache.hadoop.mapreduce.v2.TestNonExistentJob.testGetInvalidJob

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapreduce.v2.TestNonExistentJob.setUp(TestNonExistentJob.java:73)


FAILED:  org.apache.hadoop.mapreduce.v2.TestRMNMInfo.org.apache.hadoop.mapreduce.v2.TestRMNMInfo

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapreduce.v2.TestRMNMInfo.setup(TestRMNMInfo.java:79)


FAILED:  org.apache.hadoop.mapreduce.v2.TestSpeculativeExecution.org.apache.hadoop.mapreduce.v2.TestSpeculativeExecution

Error Message:
org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapreduce.v2.TestSpeculativeExecution.setup(TestSpeculativeExecution.java:117)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testFailingMapper

Error Message:
org/apache/hadoop/yarn/client/api/YarnClient

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/client/api/YarnClient
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.mapred.YARNRunner.<init>(YARNRunner.java:122)
	at org.apache.hadoop.mapred.YarnClientProtocolProvider.create(YarnClientProtocolProvider.java:34)
	at org.apache.hadoop.mapreduce.Cluster.initialize(Cluster.java:95)
	at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:82)
	at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:75)
	at org.apache.hadoop.mapreduce.Job$9.run(Job.java:1260)
	at org.apache.hadoop.mapreduce.Job$9.run(Job.java:1256)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.mapreduce.Job.connect(Job.java:1255)
	at org.apache.hadoop.mapreduce.Job.submit(Job.java:1284)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.runFailingMapperJob(TestMRJobs.java:564)
	at org.apache.hadoop.mapreduce.v2.TestUberAM.testFailingMapper(TestUberAM.java:110)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testSleepJob

Error Message:
org/apache/hadoop/mapred/ResourceMgrDelegate

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/mapred/ResourceMgrDelegate
	at org.apache.hadoop.mapred.YARNRunner.<init>(YARNRunner.java:122)
	at org.apache.hadoop.mapred.YarnClientProtocolProvider.create(YarnClientProtocolProvider.java:34)
	at org.apache.hadoop.mapreduce.Cluster.initialize(Cluster.java:95)
	at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:82)
	at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:75)
	at org.apache.hadoop.mapreduce.Job$9.run(Job.java:1260)
	at org.apache.hadoop.mapreduce.Job$9.run(Job.java:1256)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.mapreduce.Job.connect(Job.java:1255)
	at org.apache.hadoop.mapreduce.Job.submit(Job.java:1284)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testSleepJobInternal(TestMRJobs.java:229)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testSleepJob(TestMRJobs.java:191)
	at org.apache.hadoop.mapreduce.v2.TestUberAM.testSleepJob(TestUberAM.java:57)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testSleepJobWithMultipleReducers

Error Message:
org/apache/hadoop/mapred/ResourceMgrDelegate

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/mapred/ResourceMgrDelegate
	at org.apache.hadoop.mapred.YARNRunner.<init>(YARNRunner.java:122)
	at org.apache.hadoop.mapred.YarnClientProtocolProvider.create(YarnClientProtocolProvider.java:34)
	at org.apache.hadoop.mapreduce.Cluster.initialize(Cluster.java:95)
	at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:82)
	at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:75)
	at org.apache.hadoop.mapreduce.Job$9.run(Job.java:1260)
	at org.apache.hadoop.mapreduce.Job$9.run(Job.java:1256)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.mapreduce.Job.connect(Job.java:1255)
	at org.apache.hadoop.mapreduce.Job.submit(Job.java:1284)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testSleepJobInternal(TestMRJobs.java:229)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testSleepJob(TestMRJobs.java:191)
	at org.apache.hadoop.mapreduce.v2.TestUberAM.testSleepJobWithMultipleReducers(TestUberAM.java:64)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testRandomWriter

Error Message:
org/apache/hadoop/mapred/ResourceMgrDelegate

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/mapred/ResourceMgrDelegate
	at org.apache.hadoop.mapred.YARNRunner.<init>(YARNRunner.java:122)
	at org.apache.hadoop.mapred.YarnClientProtocolProvider.create(YarnClientProtocolProvider.java:34)
	at org.apache.hadoop.mapreduce.Cluster.initialize(Cluster.java:95)
	at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:82)
	at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:75)
	at org.apache.hadoop.mapreduce.Job$9.run(Job.java:1260)
	at org.apache.hadoop.mapreduce.Job$9.run(Job.java:1256)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.mapreduce.Job.connect(Job.java:1255)
	at org.apache.hadoop.mapreduce.Job.submit(Job.java:1284)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testRandomWriter(TestMRJobs.java:452)
	at org.apache.hadoop.mapreduce.v2.TestUberAM.testRandomWriter(TestUberAM.java:84)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testDistributedCache

Error Message:
org/apache/hadoop/mapred/ResourceMgrDelegate

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/mapred/ResourceMgrDelegate
	at org.apache.hadoop.mapred.YARNRunner.<init>(YARNRunner.java:122)
	at org.apache.hadoop.mapred.YarnClientProtocolProvider.create(YarnClientProtocolProvider.java:34)
	at org.apache.hadoop.mapreduce.Cluster.initialize(Cluster.java:95)
	at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:82)
	at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:75)
	at org.apache.hadoop.mapreduce.Job$9.run(Job.java:1260)
	at org.apache.hadoop.mapreduce.Job$9.run(Job.java:1256)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.mapreduce.Job.connect(Job.java:1255)
	at org.apache.hadoop.mapreduce.Job.submit(Job.java:1284)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs._testDistributedCache(TestMRJobs.java:903)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testDistributedCache(TestMRJobs.java:916)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testSleepJobWithRemoteJar

Error Message:
org/apache/hadoop/mapred/ResourceMgrDelegate

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/mapred/ResourceMgrDelegate
	at org.apache.hadoop.mapred.YARNRunner.<init>(YARNRunner.java:122)
	at org.apache.hadoop.mapred.YarnClientProtocolProvider.create(YarnClientProtocolProvider.java:34)
	at org.apache.hadoop.mapreduce.Cluster.initialize(Cluster.java:95)
	at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:82)
	at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:75)
	at org.apache.hadoop.mapreduce.Job$9.run(Job.java:1260)
	at org.apache.hadoop.mapreduce.Job$9.run(Job.java:1256)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.mapreduce.Job.connect(Job.java:1255)
	at org.apache.hadoop.mapreduce.Job.submit(Job.java:1284)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testSleepJobInternal(TestMRJobs.java:229)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testSleepJobWithRemoteJar(TestMRJobs.java:196)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testConfVerificationWithJobClient

Error Message:
org/apache/hadoop/mapred/ResourceMgrDelegate

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/mapred/ResourceMgrDelegate
	at org.apache.hadoop.mapred.YARNRunner.<init>(YARNRunner.java:122)
	at org.apache.hadoop.mapred.YarnClientProtocolProvider.create(YarnClientProtocolProvider.java:34)
	at org.apache.hadoop.mapreduce.Cluster.initialize(Cluster.java:95)
	at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:82)
	at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:75)
	at org.apache.hadoop.mapred.JobClient.init(JobClient.java:475)
	at org.apache.hadoop.mapred.JobClient.<init>(JobClient.java:465)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:292)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClient(TestMRJobs.java:263)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testConfVerificationWithClassloaderCustomClasses

Error Message:
org/apache/hadoop/mapred/ResourceMgrDelegate

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/mapred/ResourceMgrDelegate
	at org.apache.hadoop.mapred.YARNRunner.<init>(YARNRunner.java:122)
	at org.apache.hadoop.mapred.YarnClientProtocolProvider.create(YarnClientProtocolProvider.java:34)
	at org.apache.hadoop.mapreduce.Cluster.initialize(Cluster.java:95)
	at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:82)
	at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:75)
	at org.apache.hadoop.mapred.JobClient.init(JobClient.java:475)
	at org.apache.hadoop.mapred.JobClient.<init>(JobClient.java:465)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:292)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithClassloaderCustomClasses(TestMRJobs.java:253)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testConfVerificationWithOutClassloader

Error Message:
org/apache/hadoop/mapred/ResourceMgrDelegate

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/mapred/ResourceMgrDelegate
	at org.apache.hadoop.mapred.YARNRunner.<init>(YARNRunner.java:122)
	at org.apache.hadoop.mapred.YarnClientProtocolProvider.create(YarnClientProtocolProvider.java:34)
	at org.apache.hadoop.mapreduce.Cluster.initialize(Cluster.java:95)
	at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:82)
	at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:75)
	at org.apache.hadoop.mapred.JobClient.init(JobClient.java:475)
	at org.apache.hadoop.mapred.JobClient.<init>(JobClient.java:465)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:292)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithOutClassloader(TestMRJobs.java:258)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testContainerRollingLog

Error Message:
org/apache/hadoop/mapred/ResourceMgrDelegate

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/mapred/ResourceMgrDelegate
	at org.apache.hadoop.mapred.YARNRunner.<init>(YARNRunner.java:122)
	at org.apache.hadoop.mapred.YarnClientProtocolProvider.create(YarnClientProtocolProvider.java:34)
	at org.apache.hadoop.mapreduce.Cluster.initialize(Cluster.java:95)
	at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:82)
	at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:75)
	at org.apache.hadoop.mapreduce.Job$9.run(Job.java:1260)
	at org.apache.hadoop.mapreduce.Job$9.run(Job.java:1256)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.mapreduce.Job.connect(Job.java:1255)
	at org.apache.hadoop.mapreduce.Job.submit(Job.java:1284)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1308)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testContainerRollingLog(TestMRJobs.java:652)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testConfVerificationWithJobClientLocal

Error Message:
org/apache/hadoop/yarn/util/Apps

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/util/Apps
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.mapred.LocalDistributedCacheManager.setup(LocalDistributedCacheManager.java:93)
	at org.apache.hadoop.mapred.LocalJobRunner$Job.<init>(LocalJobRunner.java:172)
	at org.apache.hadoop.mapred.LocalJobRunner.submitJob(LocalJobRunner.java:786)
	at org.apache.hadoop.mapreduce.JobSubmitter.submitJobInternal(JobSubmitter.java:241)
	at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1290)
	at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1287)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.mapreduce.Job.submit(Job.java:1287)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:332)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClientLocal(TestMRJobs.java:268)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: java.lang.ClassNotFoundException: org.apache.hadoop.yarn.util.Apps
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.mapred.LocalDistributedCacheManager.setup(LocalDistributedCacheManager.java:93)
	at org.apache.hadoop.mapred.LocalJobRunner$Job.<init>(LocalJobRunner.java:172)
	at org.apache.hadoop.mapred.LocalJobRunner.submitJob(LocalJobRunner.java:786)
	at org.apache.hadoop.mapreduce.JobSubmitter.submitJobInternal(JobSubmitter.java:241)
	at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1290)
	at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1287)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.mapreduce.Job.submit(Job.java:1287)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:332)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithJobClientLocal(TestMRJobs.java:268)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.mapreduce.v2.TestUberAM.testConfVerificationWithClassloader

Error Message:
org/apache/hadoop/mapred/ResourceMgrDelegate

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/mapred/ResourceMgrDelegate
	at org.apache.hadoop.mapred.YARNRunner.<init>(YARNRunner.java:122)
	at org.apache.hadoop.mapred.YarnClientProtocolProvider.create(YarnClientProtocolProvider.java:34)
	at org.apache.hadoop.mapreduce.Cluster.initialize(Cluster.java:95)
	at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:82)
	at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:75)
	at org.apache.hadoop.mapred.JobClient.init(JobClient.java:475)
	at org.apache.hadoop.mapred.JobClient.<init>(JobClient.java:465)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerification(TestMRJobs.java:292)
	at org.apache.hadoop.mapreduce.v2.TestMRJobs.testConfVerificationWithClassloader(TestMRJobs.java:247)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)


FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob

Error Message:
org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createRMTimelineCollectorManager(ResourceManager.java:376)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.access$300(ResourceManager.java:125)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$RMActiveServices.serviceInit(ResourceManager.java:513)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.createAndInitActiveServices(ResourceManager.java:1004)
	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceInit(ResourceManager.java:263)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.initResourceManager(MiniYARNCluster.java:298)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$400(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceInit(MiniYARNCluster.java:450)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.serviceInit(MiniYARNCluster.java:272)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.serviceInit(MiniMRYarnCluster.java:186)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:79)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:41)
	at org.apache.hadoop.mapred.TestNetworkedJob.createMiniClusterWithCapacityScheduler(TestNetworkedJob.java:401)
	at org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob(TestNetworkedJob.java:133)



Hadoop-Mapreduce-trunk - Build # 2502 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2502/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 33429 lines...]
Running org.apache.hadoop.mapred.pipes.TestPipesNonJavaInputFormat
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.332 sec - in org.apache.hadoop.mapred.pipes.TestPipesNonJavaInputFormat
Running org.apache.hadoop.mapred.pipes.TestPipeApplication
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.35 sec - in org.apache.hadoop.mapred.pipes.TestPipeApplication
Running org.apache.hadoop.mapred.pipes.TestPipes
Tests run: 1, Failures: 0, Errors: 0, Skipped: 1, Time elapsed: 0.046 sec - in org.apache.hadoop.mapred.pipes.TestPipes
Running org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 26.781 sec - in org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Running org.apache.hadoop.mapred.TestReporter
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 13.686 sec - in org.apache.hadoop.mapred.TestReporter

Results :

Tests in error: 
  TestNetworkedJob.testNetworkedJob:260 » IO org.apache.hadoop.yarn.exceptions.Y...

Tests run: 523, Failures: 0, Errors: 1, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.798 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:35 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 31.198 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.178 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:17 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:36 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:39 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 01:56 h
[INFO] Finished at: 2015-10-20T01:55:45+00:00
[INFO] Final Memory: 43M/1078M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9250
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob

Error Message:
org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
 at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
 at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
 at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2285)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
 ... 10 more


Stack Trace:
java.io.IOException: org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
	at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
	at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2285)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
	... 10 more

	at org.apache.hadoop.ipc.Client.call(Client.java:1443)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy84.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getDelegationToken(ApplicationClientProtocolPBClientImpl.java:339)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:255)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy85.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getRMDelegationToken(YarnClientImpl.java:541)
	at org.apache.hadoop.mapred.ResourceMgrDelegate.getDelegationToken(ResourceMgrDelegate.java:177)
	at org.apache.hadoop.mapred.YARNRunner.getDelegationToken(YARNRunner.java:231)
	at org.apache.hadoop.mapreduce.Cluster.getDelegationToken(Cluster.java:401)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1234)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1231)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapred.JobClient.getDelegationToken(JobClient.java:1230)
	at org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob(TestNetworkedJob.java:260)



Hadoop-Mapreduce-trunk - Build # 2501 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2501/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32877 lines...]
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 7.578 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobConf
Running org.apache.hadoop.mapred.TestTaskAttemptListenerImpl
Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.71 sec - in org.apache.hadoop.mapred.TestTaskAttemptListenerImpl
Running org.apache.hadoop.mapred.TestTaskAttemptFinishingMonitor
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.981 sec - in org.apache.hadoop.mapred.TestTaskAttemptFinishingMonitor
Running org.apache.hadoop.mapred.TestLocalContainerLauncher
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.411 sec - in org.apache.hadoop.mapred.TestLocalContainerLauncher

Results :

Failed tests: 
  TestMRApp.testUpdatedNodes:245 Expecting 2 completion events for success expected:<2> but was:<1>
  TestTaskAttempt.testMillisCountersUpdate:253->verifyMillisCounters:297 Job state is not correct (timedout) expected:<SUCCEEDED> but was:<RUNNING>

Tests in error: 
  TestTaskAttempt.testMRAppHistoryForMap:107 » YarnRuntime could not cleanup tes...

Tests run: 339, Failures: 2, Errors: 1, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.746 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [02:06 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 38.567 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  6.915 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [11:11 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 14:09 min
[INFO] Finished at: 2015-10-19T22:22:28+00:00
[INFO] Final Memory: 42M/1251M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
3 tests failed.
FAILED:  org.apache.hadoop.mapreduce.v2.app.TestMRApp.testUpdatedNodes

Error Message:
Expecting 2 completion events for success expected:<2> but was:<1>

Stack Trace:
java.lang.AssertionError: Expecting 2 completion events for success expected:<2> but was:<1>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:555)
	at org.apache.hadoop.mapreduce.v2.app.TestMRApp.testUpdatedNodes(TestMRApp.java:245)


FAILED:  org.apache.hadoop.mapreduce.v2.app.job.impl.TestTaskAttempt.testMillisCountersUpdate

Error Message:
Job state is not correct (timedout) expected:<SUCCEEDED> but was:<RUNNING>

Stack Trace:
java.lang.AssertionError: Job state is not correct (timedout) expected:<SUCCEEDED> but was:<RUNNING>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.waitForState(MRApp.java:411)
	at org.apache.hadoop.mapreduce.v2.app.job.impl.TestTaskAttempt.verifyMillisCounters(TestTaskAttempt.java:297)
	at org.apache.hadoop.mapreduce.v2.app.job.impl.TestTaskAttempt.testMillisCountersUpdate(TestTaskAttempt.java:253)


FAILED:  org.apache.hadoop.mapreduce.v2.app.job.impl.TestTaskAttempt.testMRAppHistoryForMap

Error Message:
could not cleanup test dir

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: could not cleanup test dir
	at org.apache.hadoop.fs.AbstractFileSystem.createFileSystem(AbstractFileSystem.java:161)
	at org.apache.hadoop.fs.AbstractFileSystem.get(AbstractFileSystem.java:250)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:332)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:329)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.fs.FileContext.getAbstractFileSystem(FileContext.java:329)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:446)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:423)
	at org.apache.hadoop.fs.FileContext.getLocalFSFileContext(FileContext.java:409)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:239)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:208)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:189)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:150)
	at org.apache.hadoop.mapreduce.v2.app.job.impl.TestTaskAttempt$FailingAttemptsMRApp.<init>(TestTaskAttempt.java:394)
	at org.apache.hadoop.mapreduce.v2.app.job.impl.TestTaskAttempt.testMRAppHistoryForMap(TestTaskAttempt.java:107)



Hadoop-Mapreduce-trunk - Build # 2500 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2500/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 33429 lines...]
Killed
Running org.apache.hadoop.mapreduce.lib.output.TestMRCJCFileOutputCommitter
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.548 sec - in org.apache.hadoop.mapreduce.lib.output.TestMRCJCFileOutputCommitter
Running org.apache.hadoop.util.TestMRCJCReflectionUtils
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.671 sec - in org.apache.hadoop.util.TestMRCJCReflectionUtils
Running org.apache.hadoop.util.TestMRCJCRunJar
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.251 sec - in org.apache.hadoop.util.TestMRCJCRunJar
Running org.apache.hadoop.io.TestSequenceFileMergeProgress
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.374 sec - in org.apache.hadoop.io.TestSequenceFileMergeProgress

Results :

Tests in error: 
  TestMiniMRClientCluster.setup:80 » YarnRuntime org.apache.hadoop.yarn.exceptio...
  TestMiniMRClientCluster.cleanup:93 NullPointer
  TestNetworkedJob.testNetworkedJob:260 » IO org.apache.hadoop.yarn.exceptions.Y...

Tests run: 506, Failures: 0, Errors: 3, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.929 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:37 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 30.079 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  4.821 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:22 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:36 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:46 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:03 h
[INFO] Finished at: 2015-10-19T21:15:57+00:00
[INFO] Final Memory: 34M/607M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: ExecutionException: java.lang.RuntimeException: The forked VM terminated without properly saying goodbye. VM crash or System.exit called?
[ERROR] Command was /bin/sh -c cd /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient && /home/jenkins/tools/java/jdk1.7.0_55/jre/bin/java -Xmx2048m -XX:MaxPermSize=768m -XX:+HeapDumpOnOutOfMemoryError -jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefirebooter6197611322357438437.jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefire1949052212631076559tmp /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefire_2495365912892048238671tmp
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HADOOP-12464
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
3 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMiniMRClientCluster.org.apache.hadoop.mapred.TestMiniMRClientCluster

Error Message:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: Failed to intialize existing directories

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: org.apache.hadoop.yarn.exceptions.YarnRuntimeException: Failed to intialize existing directories
	at org.apache.hadoop.fs.RawLocalFileSystem.listStatus(RawLocalFileSystem.java:460)
	at org.apache.hadoop.fs.DelegateToFileSystem.listStatus(DelegateToFileSystem.java:168)
	at org.apache.hadoop.fs.ChecksumFs.listStatus(ChecksumFs.java:521)
	at org.apache.hadoop.fs.AbstractFileSystem$1.<init>(AbstractFileSystem.java:890)
	at org.apache.hadoop.fs.AbstractFileSystem.listStatusIterator(AbstractFileSystem.java:888)
	at org.apache.hadoop.fs.FileContext$22.next(FileContext.java:1492)
	at org.apache.hadoop.fs.FileContext$22.next(FileContext.java:1487)
	at org.apache.hadoop.fs.FSLinkResolver.resolve(FSLinkResolver.java:90)
	at org.apache.hadoop.fs.FileContext.listStatus(FileContext.java:1487)
	at org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils.localGlobber(JobHistoryUtils.java:456)
	at org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils.localGlobber(JobHistoryUtils.java:444)
	at org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils.localGlobber(JobHistoryUtils.java:439)
	at org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.findTimestampedDirectories(HistoryFileManager.java:778)
	at org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.initExisting(HistoryFileManager.java:672)
	at org.apache.hadoop.mapreduce.v2.hs.JobHistory.serviceInit(JobHistory.java:97)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer.serviceInit(JobHistoryServer.java:151)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster$JobHistoryServerWrapper.serviceStart(MiniMRYarnCluster.java:210)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.service.CompositeService.serviceStart(CompositeService.java:120)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:80)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:41)
	at org.apache.hadoop.mapred.TestMiniMRClientCluster.setup(TestMiniMRClientCluster.java:80)


FAILED:  org.apache.hadoop.mapred.TestMiniMRClientCluster.org.apache.hadoop.mapred.TestMiniMRClientCluster

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.mapred.TestMiniMRClientCluster.cleanup(TestMiniMRClientCluster.java:93)


FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob

Error Message:
org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
 at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
 at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
 at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2285)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
 ... 10 more


Stack Trace:
java.io.IOException: org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
	at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
	at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2285)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
	... 10 more

	at org.apache.hadoop.ipc.Client.call(Client.java:1443)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy84.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getDelegationToken(ApplicationClientProtocolPBClientImpl.java:339)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:255)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy85.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getRMDelegationToken(YarnClientImpl.java:541)
	at org.apache.hadoop.mapred.ResourceMgrDelegate.getDelegationToken(ResourceMgrDelegate.java:177)
	at org.apache.hadoop.mapred.YARNRunner.getDelegationToken(YARNRunner.java:231)
	at org.apache.hadoop.mapreduce.Cluster.getDelegationToken(Cluster.java:401)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1234)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1231)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapred.JobClient.getDelegationToken(JobClient.java:1230)
	at org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob(TestNetworkedJob.java:260)



Hadoop-Mapreduce-trunk - Build # 2499 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2499/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 33390 lines...]
Running org.apache.hadoop.mapred.TestJobSysDirWithDFS
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 92.304 sec - in org.apache.hadoop.mapred.TestJobSysDirWithDFS
Running org.apache.hadoop.mapred.TestReduceTask
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.578 sec - in org.apache.hadoop.mapred.TestReduceTask
Running org.apache.hadoop.mapred.TestBadRecords
Tests run: 1, Failures: 0, Errors: 0, Skipped: 1, Time elapsed: 0.049 sec - in org.apache.hadoop.mapred.TestBadRecords
Running org.apache.hadoop.mapred.TestMiniMRDFSCaching
Tests run: 1, Failures: 0, Errors: 0, Skipped: 1, Time elapsed: 0.064 sec - in org.apache.hadoop.mapred.TestMiniMRDFSCaching
Running org.apache.hadoop.io.TestSequenceFileMergeProgress
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.666 sec - in org.apache.hadoop.io.TestSequenceFileMergeProgress
Running org.apache.hadoop.conf.TestNoDefaultsJobConf
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 23.289 sec - in org.apache.hadoop.conf.TestNoDefaultsJobConf

Results :

Tests in error: 
  TestNetworkedJob.testNetworkedJob:260 » IO org.apache.hadoop.yarn.exceptions.Y...

Tests run: 519, Failures: 0, Errors: 1, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  4.203 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [02:06 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 36.685 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.230 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:48 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:53 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:54 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:13 h
[INFO] Finished at: 2015-10-19T18:16:31+00:00
[INFO] Final Memory: 37M/999M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There was a timeout or other error in the fork -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating YARN-4262
Updating YARN-4267
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob

Error Message:
org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
 at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
 at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
 at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2285)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
 ... 10 more


Stack Trace:
java.io.IOException: org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
	at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
	at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2285)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
	... 10 more

	at org.apache.hadoop.ipc.Client.call(Client.java:1449)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy84.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getDelegationToken(ApplicationClientProtocolPBClientImpl.java:339)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:251)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy85.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getRMDelegationToken(YarnClientImpl.java:541)
	at org.apache.hadoop.mapred.ResourceMgrDelegate.getDelegationToken(ResourceMgrDelegate.java:177)
	at org.apache.hadoop.mapred.YARNRunner.getDelegationToken(YARNRunner.java:231)
	at org.apache.hadoop.mapreduce.Cluster.getDelegationToken(Cluster.java:401)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1234)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1231)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapred.JobClient.getDelegationToken(JobClient.java:1230)
	at org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob(TestNetworkedJob.java:260)



Hadoop-Mapreduce-trunk - Build # 2498 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2498/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 33393 lines...]
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.564 sec - in org.apache.hadoop.mapred.TestReduceTask
Running org.apache.hadoop.mapred.TestBadRecords
Tests run: 1, Failures: 0, Errors: 0, Skipped: 1, Time elapsed: 0.055 sec - in org.apache.hadoop.mapred.TestBadRecords
Running org.apache.hadoop.mapred.TestMiniMRDFSCaching
Tests run: 1, Failures: 0, Errors: 0, Skipped: 1, Time elapsed: 0.048 sec - in org.apache.hadoop.mapred.TestMiniMRDFSCaching
Running org.apache.hadoop.io.TestSequenceFileMergeProgress
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.6 sec - in org.apache.hadoop.io.TestSequenceFileMergeProgress
Running org.apache.hadoop.conf.TestNoDefaultsJobConf
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 22.836 sec - in org.apache.hadoop.conf.TestNoDefaultsJobConf

Results :

Tests in error: 
  TestNetworkedJob.testNetworkedJob:260 » IO org.apache.hadoop.yarn.exceptions.Y...

Tests run: 523, Failures: 0, Errors: 1, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.133 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:45 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 33.078 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.403 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:48 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:45 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:47 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:05 h
[INFO] Finished at: 2015-10-19T10:17:16+00:00
[INFO] Final Memory: 34M/719M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HADOOP-12460
Updating HDFS-9237
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob

Error Message:
org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
 at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
 at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
 at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2285)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
 ... 10 more


Stack Trace:
java.io.IOException: org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
	at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
	at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2285)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
	... 10 more

	at org.apache.hadoop.ipc.Client.call(Client.java:1449)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy84.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getDelegationToken(ApplicationClientProtocolPBClientImpl.java:339)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:251)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy85.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getRMDelegationToken(YarnClientImpl.java:541)
	at org.apache.hadoop.mapred.ResourceMgrDelegate.getDelegationToken(ResourceMgrDelegate.java:177)
	at org.apache.hadoop.mapred.YARNRunner.getDelegationToken(YARNRunner.java:231)
	at org.apache.hadoop.mapreduce.Cluster.getDelegationToken(Cluster.java:401)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1234)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1231)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapred.JobClient.getDelegationToken(JobClient.java:1230)
	at org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob(TestNetworkedJob.java:260)



Hadoop-Mapreduce-trunk - Build # 2497 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2497/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 36167 lines...]
Running org.apache.hadoop.mapred.TestReduceFetchFromPartialMem
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 103.177 sec - in org.apache.hadoop.mapred.TestReduceFetchFromPartialMem
Running org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 29.544 sec - in org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Running org.apache.hadoop.mapred.TestSequenceFileAsBinaryOutputFormat
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.452 sec - in org.apache.hadoop.mapred.TestSequenceFileAsBinaryOutputFormat

Results :

Failed tests: 
  TestMRTimelineEventHandling.testMRTimelineEventHandling:105 expected:<1> but was:<0>

Tests in error: 
  TestMRJobsWithProfiler.testDifferentProfilers:117->testProfilerInternal:150 » 
  TestNetworkedJob.testNetworkedJob:260 » IO org.apache.hadoop.yarn.exceptions.Y...

Tests run: 523, Failures: 1, Errors: 2, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  4.276 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [02:10 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 38.320 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  6.700 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [10:43 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [06:22 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  02:06 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:26 h
[INFO] Finished at: 2015-10-19T05:34:53+00:00
[INFO] Final Memory: 40M/714M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HADOOP-12483
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
3 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling

Error Message:
expected:<1> but was:<0>

Stack Trace:
java.lang.AssertionError: expected:<1> but was:<0>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:555)
	at org.junit.Assert.assertEquals(Assert.java:542)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling(TestMRTimelineEventHandling.java:105)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testDifferentProfilers

Error Message:
test timed out after 150000 milliseconds

Stack Trace:
java.lang.Exception: test timed out after 150000 milliseconds
	at java.lang.Thread.sleep(Native Method)
	at org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1353)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1311)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testProfilerInternal(TestMRJobsWithProfiler.java:150)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testDifferentProfilers(TestMRJobsWithProfiler.java:117)


FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob

Error Message:
org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
 at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
 at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
 at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2285)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
 ... 10 more


Stack Trace:
java.io.IOException: org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
	at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
	at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2291)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2287)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2285)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
	... 10 more

	at org.apache.hadoop.ipc.Client.call(Client.java:1449)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy84.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getDelegationToken(ApplicationClientProtocolPBClientImpl.java:339)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:251)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy85.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getRMDelegationToken(YarnClientImpl.java:541)
	at org.apache.hadoop.mapred.ResourceMgrDelegate.getDelegationToken(ResourceMgrDelegate.java:177)
	at org.apache.hadoop.mapred.YARNRunner.getDelegationToken(YARNRunner.java:231)
	at org.apache.hadoop.mapreduce.Cluster.getDelegationToken(Cluster.java:401)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1234)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1231)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapred.JobClient.getDelegationToken(JobClient.java:1230)
	at org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob(TestNetworkedJob.java:260)



Hadoop-Mapreduce-trunk - Build # 2496 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2496/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 33394 lines...]
Running org.apache.hadoop.mapred.TestReporter
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 14.203 sec - in org.apache.hadoop.mapred.TestReporter
Running org.apache.hadoop.mapred.TestClientRedirect
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 16.48 sec - in org.apache.hadoop.mapred.TestClientRedirect
Running org.apache.hadoop.mapred.TestReduceFetchFromPartialMem
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 80.298 sec - in org.apache.hadoop.mapred.TestReduceFetchFromPartialMem
Running org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 28.377 sec - in org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Running org.apache.hadoop.mapred.TestSequenceFileAsBinaryOutputFormat
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.284 sec - in org.apache.hadoop.mapred.TestSequenceFileAsBinaryOutputFormat

Results :

Tests in error: 
  TestNetworkedJob.testNetworkedJob:260 » IO org.apache.hadoop.yarn.exceptions.Y...

Tests run: 523, Failures: 0, Errors: 1, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.700 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:58 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 35.485 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  6.203 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [10:19 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [06:10 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:55 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:15 h
[INFO] Finished at: 2015-10-18T17:10:08+00:00
[INFO] Final Memory: 34M/750M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HADOOP-10941
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob

Error Message:
org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
 at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
 at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
 at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2276)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2272)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2270)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
 ... 10 more


Stack Trace:
java.io.IOException: org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
	at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
	at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2276)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2272)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2270)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
	... 10 more

	at org.apache.hadoop.ipc.Client.call(Client.java:1449)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy84.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getDelegationToken(ApplicationClientProtocolPBClientImpl.java:339)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:251)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy85.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getRMDelegationToken(YarnClientImpl.java:541)
	at org.apache.hadoop.mapred.ResourceMgrDelegate.getDelegationToken(ResourceMgrDelegate.java:177)
	at org.apache.hadoop.mapred.YARNRunner.getDelegationToken(YARNRunner.java:231)
	at org.apache.hadoop.mapreduce.Cluster.getDelegationToken(Cluster.java:401)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1234)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1231)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapred.JobClient.getDelegationToken(JobClient.java:1230)
	at org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob(TestNetworkedJob.java:260)



Hadoop-Mapreduce-trunk - Build # 2495 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2495/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 36226 lines...]
Running org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 28.261 sec - in org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Running org.apache.hadoop.mapred.TestSequenceFileAsBinaryOutputFormat
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.412 sec - in org.apache.hadoop.mapred.TestSequenceFileAsBinaryOutputFormat

Results :

Tests in error: 
  TestMRJobsWithProfiler.testDifferentProfilers:117->testProfilerInternal:155 » 
  TestJobOutputCommitter.tearDown:67->HadoopTestCase.tearDown:170 » NoClassDefFound
  TestJobOutputCommitter.setUp:59->HadoopTestCase.setUp:157 » YarnRuntime could ...
  TestJobOutputCommitter.setUp:59->HadoopTestCase.setUp:157 » YarnRuntime could ...
  TestNetworkedJob.testNetworkedJob:260 » IO org.apache.hadoop.yarn.exceptions.Y...

Tests run: 523, Failures: 0, Errors: 5, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.972 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [02:10 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 40.945 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  6.057 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [10:30 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [06:11 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:52 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:11 h
[INFO] Finished at: 2015-10-18T14:20:20+00:00
[INFO] Final Memory: 36M/1094M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HADOOP-11628
Updating YARN-4155
Updating HADOOP-12450
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
5 tests failed.
FAILED:  org.apache.hadoop.mapreduce.lib.output.TestJobOutputCommitter.testCustomCleanup

Error Message:
org/apache/hadoop/service/ServiceOperations

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/service/ServiceOperations
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.service.CompositeService.stop(CompositeService.java:157)
	at org.apache.hadoop.service.CompositeService.serviceStop(CompositeService.java:131)
	at org.apache.hadoop.service.AbstractService.stop(AbstractService.java:221)
	at org.apache.hadoop.mapred.MiniMRYarnClusterAdapter.stop(MiniMRYarnClusterAdapter.java:55)
	at org.apache.hadoop.mapred.MiniMRCluster.shutdown(MiniMRCluster.java:267)
	at org.apache.hadoop.mapred.HadoopTestCase.tearDown(HadoopTestCase.java:170)
	at org.apache.hadoop.mapreduce.lib.output.TestJobOutputCommitter.tearDown(TestJobOutputCommitter.java:67)


FAILED:  org.apache.hadoop.mapreduce.lib.output.TestJobOutputCommitter.testDefaultCleanupAndAbort

Error Message:
could not cleanup test dir: org.apache.hadoop.fs.UnsupportedFileSystemException: fs.AbstractFileSystem.file.impl=null: No AbstractFileSystem configured for scheme: file

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: could not cleanup test dir: org.apache.hadoop.fs.UnsupportedFileSystemException: fs.AbstractFileSystem.file.impl=null: No AbstractFileSystem configured for scheme: file
	at org.apache.hadoop.fs.AbstractFileSystem.createFileSystem(AbstractFileSystem.java:161)
	at org.apache.hadoop.fs.AbstractFileSystem.get(AbstractFileSystem.java:250)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:332)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:329)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.fs.FileContext.getAbstractFileSystem(FileContext.java:329)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:446)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:423)
	at org.apache.hadoop.fs.FileContext.getLocalFSFileContext(FileContext.java:409)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.<init>(MiniYARNCluster.java:147)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.<init>(MiniMRYarnCluster.java:79)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.<init>(MiniMRYarnCluster.java:75)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:73)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.HadoopTestCase.setUp(HadoopTestCase.java:157)
	at org.apache.hadoop.mapreduce.lib.output.TestJobOutputCommitter.setUp(TestJobOutputCommitter.java:59)


FAILED:  org.apache.hadoop.mapreduce.lib.output.TestJobOutputCommitter.testCustomAbort

Error Message:
could not cleanup test dir: org.apache.hadoop.fs.UnsupportedFileSystemException: fs.AbstractFileSystem.file.impl=null: No AbstractFileSystem configured for scheme: file

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: could not cleanup test dir: org.apache.hadoop.fs.UnsupportedFileSystemException: fs.AbstractFileSystem.file.impl=null: No AbstractFileSystem configured for scheme: file
	at org.apache.hadoop.fs.AbstractFileSystem.createFileSystem(AbstractFileSystem.java:161)
	at org.apache.hadoop.fs.AbstractFileSystem.get(AbstractFileSystem.java:250)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:332)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:329)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.fs.FileContext.getAbstractFileSystem(FileContext.java:329)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:446)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:423)
	at org.apache.hadoop.fs.FileContext.getLocalFSFileContext(FileContext.java:409)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.<init>(MiniYARNCluster.java:147)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.<init>(MiniMRYarnCluster.java:79)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster.<init>(MiniMRYarnCluster.java:75)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:73)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.HadoopTestCase.setUp(HadoopTestCase.java:157)
	at org.apache.hadoop.mapreduce.lib.output.TestJobOutputCommitter.setUp(TestJobOutputCommitter.java:59)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testDifferentProfilers

Error Message:
test timed out after 150000 milliseconds

Stack Trace:
java.lang.Exception: test timed out after 150000 milliseconds
	at java.lang.Thread.sleep(Native Method)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testProfilerInternal(TestMRJobsWithProfiler.java:155)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testDifferentProfilers(TestMRJobsWithProfiler.java:117)


FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob

Error Message:
org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
 at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
 at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
 at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2276)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2272)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2270)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
 ... 10 more


Stack Trace:
java.io.IOException: org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
	at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
	at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2276)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2272)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2270)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
	... 10 more

	at org.apache.hadoop.ipc.Client.call(Client.java:1449)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy84.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getDelegationToken(ApplicationClientProtocolPBClientImpl.java:339)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:251)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy85.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getRMDelegationToken(YarnClientImpl.java:541)
	at org.apache.hadoop.mapred.ResourceMgrDelegate.getDelegationToken(ResourceMgrDelegate.java:177)
	at org.apache.hadoop.mapred.YARNRunner.getDelegationToken(YARNRunner.java:231)
	at org.apache.hadoop.mapreduce.Cluster.getDelegationToken(Cluster.java:401)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1234)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1231)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1669)
	at org.apache.hadoop.mapred.JobClient.getDelegationToken(JobClient.java:1230)
	at org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob(TestNetworkedJob.java:260)



Hadoop-Mapreduce-trunk - Build # 2494 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2494/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 33393 lines...]
Running org.apache.hadoop.mapreduce.lib.chain.TestMapReduceChain
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 4.376 sec - in org.apache.hadoop.mapreduce.lib.chain.TestMapReduceChain
Running org.apache.hadoop.mapreduce.lib.chain.TestChainErrors
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 8.228 sec - in org.apache.hadoop.mapreduce.lib.chain.TestChainErrors
Running org.apache.hadoop.mapreduce.lib.map.TestMultithreadedMapper
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.948 sec - in org.apache.hadoop.mapreduce.lib.map.TestMultithreadedMapper
Running org.apache.hadoop.mapreduce.TestMapCollection
Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 16.582 sec - in org.apache.hadoop.mapreduce.TestMapCollection
Running org.apache.hadoop.io.TestSequenceFileMergeProgress
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.889 sec - in org.apache.hadoop.io.TestSequenceFileMergeProgress

Results :

Tests in error: 
  TestNetworkedJob.testNetworkedJob:260 » IO org.apache.hadoop.yarn.exceptions.Y...

Tests run: 523, Failures: 0, Errors: 1, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.768 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:33 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 28.986 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  4.842 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:07 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:35 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:46 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:03 h
[INFO] Finished at: 2015-10-17T04:57:35+00:00
[INFO] Final Memory: 46M/981M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating YARN-2556
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob

Error Message:
org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
 at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
 at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
 at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2276)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2272)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2270)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
 ... 10 more


Stack Trace:
java.io.IOException: org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
	at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
	at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2276)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2272)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2270)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
	... 10 more

	at org.apache.hadoop.ipc.Client.call(Client.java:1449)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy84.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getDelegationToken(ApplicationClientProtocolPBClientImpl.java:339)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:251)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy85.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getRMDelegationToken(YarnClientImpl.java:541)
	at org.apache.hadoop.mapred.ResourceMgrDelegate.getDelegationToken(ResourceMgrDelegate.java:177)
	at org.apache.hadoop.mapred.YARNRunner.getDelegationToken(YARNRunner.java:231)
	at org.apache.hadoop.mapreduce.Cluster.getDelegationToken(Cluster.java:401)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1234)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1231)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.mapred.JobClient.getDelegationToken(JobClient.java:1230)
	at org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob(TestNetworkedJob.java:260)



Hadoop-Mapreduce-trunk - Build # 2493 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2493/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 36177 lines...]
Running org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 29.303 sec - in org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Running org.apache.hadoop.mapred.TestSequenceFileAsBinaryOutputFormat
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.402 sec - in org.apache.hadoop.mapred.TestSequenceFileAsBinaryOutputFormat

Results :

Failed tests: 
  TestMRTimelineEventHandling.testMRTimelineEventHandling:105 expected:<1> but was:<0>

Tests in error: 
  TestMRJobsWithProfiler.testDifferentProfilers:117->testProfilerInternal:150 » 
  TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled:162 » IO Jo...
  TestNetworkedJob.testNetworkedJob:260 » IO org.apache.hadoop.yarn.exceptions.Y...

Tests run: 523, Failures: 1, Errors: 3, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.913 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [02:07 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 37.837 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  6.545 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [10:39 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [06:23 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  02:04 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:24 h
[INFO] Finished at: 2015-10-17T02:21:15+00:00
[INFO] Final Memory: 35M/750M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating YARN-4170
Updating YARN-4162
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
4 tests failed.
FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling

Error Message:
expected:<1> but was:<0>

Stack Trace:
java.lang.AssertionError: expected:<1> but was:<0>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:555)
	at org.junit.Assert.assertEquals(Assert.java:542)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMRTimelineEventHandling(TestMRTimelineEventHandling.java:105)


FAILED:  org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled

Error Message:
Job didn't finish in 30 seconds

Stack Trace:
java.io.IOException: Job didn't finish in 30 seconds
	at org.apache.hadoop.mapred.UtilsForTests.runJobSucceed(UtilsForTests.java:622)
	at org.apache.hadoop.mapred.TestMRTimelineEventHandling.testMapreduceJobTimelineServiceEnabled(TestMRTimelineEventHandling.java:162)


FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob

Error Message:
org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
 at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
 at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
 at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2276)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2272)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2270)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
 ... 10 more


Stack Trace:
java.io.IOException: org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
	at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
	at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2276)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2272)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2270)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
	... 10 more

	at org.apache.hadoop.ipc.Client.call(Client.java:1449)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy84.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getDelegationToken(ApplicationClientProtocolPBClientImpl.java:339)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:251)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy85.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getRMDelegationToken(YarnClientImpl.java:541)
	at org.apache.hadoop.mapred.ResourceMgrDelegate.getDelegationToken(ResourceMgrDelegate.java:177)
	at org.apache.hadoop.mapred.YARNRunner.getDelegationToken(YARNRunner.java:231)
	at org.apache.hadoop.mapreduce.Cluster.getDelegationToken(Cluster.java:401)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1234)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1231)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.mapred.JobClient.getDelegationToken(JobClient.java:1230)
	at org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob(TestNetworkedJob.java:260)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testDifferentProfilers

Error Message:
test timed out after 150000 milliseconds

Stack Trace:
java.lang.Exception: test timed out after 150000 milliseconds
	at java.lang.Thread.sleep(Native Method)
	at org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1353)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1311)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testProfilerInternal(TestMRJobsWithProfiler.java:150)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testDifferentProfilers(TestMRJobsWithProfiler.java:117)



Hadoop-Mapreduce-trunk - Build # 2492 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2492/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32830 lines...]
Running org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices
Tests run: 13, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 10.854 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices
Running org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts
Tests run: 15, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 17.895 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts
Running org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs
Tests run: 21, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 25.28 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs
Running org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 7.138 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt
Running org.apache.hadoop.mapreduce.TestMapreduceConfigFields
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.071 sec - in org.apache.hadoop.mapreduce.TestMapreduceConfigFields

Results :

Failed tests: 
  TestJobHistoryEventHandler.testTimelineEventHandling:511 expected:<1> but was:<0>

Tests run: 339, Failures: 1, Errors: 0, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.668 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [02:20 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 38.196 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  8.847 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [11:48 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 15:02 min
[INFO] Finished at: 2015-10-16T19:26:13+00:00
[INFO] Final Memory: 40M/825M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9253
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testTimelineEventHandling

Error Message:
expected:<1> but was:<0>

Stack Trace:
java.lang.AssertionError: expected:<1> but was:<0>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:555)
	at org.junit.Assert.assertEquals(Assert.java:542)
	at org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler.testTimelineEventHandling(TestJobHistoryEventHandler.java:511)



Hadoop-Mapreduce-trunk - Build # 2491 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2491/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 33376 lines...]
Running org.apache.hadoop.mapred.pipes.TestPipesNonJavaInputFormat
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.39 sec - in org.apache.hadoop.mapred.pipes.TestPipesNonJavaInputFormat
Running org.apache.hadoop.mapred.pipes.TestPipeApplication
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.28 sec - in org.apache.hadoop.mapred.pipes.TestPipeApplication
Running org.apache.hadoop.mapred.TestJavaSerialization
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 4.473 sec - in org.apache.hadoop.mapred.TestJavaSerialization
Running org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 26.567 sec - in org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Running org.apache.hadoop.ipc.TestMRCJCSocketFactory
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 20.22 sec - in org.apache.hadoop.ipc.TestMRCJCSocketFactory

Results :

Tests in error: 
  TestNetworkedJob.testNetworkedJob:260 » IO org.apache.hadoop.yarn.exceptions.Y...

Tests run: 521, Failures: 0, Errors: 1, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.752 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:32 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 28.705 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  4.834 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:06 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:34 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:40 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 01:57 h
[INFO] Finished at: 2015-10-16T17:56:37+00:00
[INFO] Final Memory: 33M/751M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: ExecutionException: java.lang.RuntimeException: The forked VM terminated without properly saying goodbye. VM crash or System.exit called?
[ERROR] Command was /bin/sh -c cd /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient && /home/jenkins/tools/java/jdk1.7.0_55/jre/bin/java -Xmx2048m -XX:MaxPermSize=768m -XX:+HeapDumpOnOutOfMemoryError -jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefirebooter8834521897017600972.jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefire6711647611461174406tmp /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefire_2605997871747532553149tmp
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9205
Updating HDFS-9257
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob

Error Message:
org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
 at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
 at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
 at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2276)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2272)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2270)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
 ... 10 more


Stack Trace:
java.io.IOException: org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
	at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
	at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2276)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2272)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2270)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
	... 10 more

	at org.apache.hadoop.ipc.Client.call(Client.java:1449)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy84.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getDelegationToken(ApplicationClientProtocolPBClientImpl.java:339)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:251)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy85.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getRMDelegationToken(YarnClientImpl.java:541)
	at org.apache.hadoop.mapred.ResourceMgrDelegate.getDelegationToken(ResourceMgrDelegate.java:177)
	at org.apache.hadoop.mapred.YARNRunner.getDelegationToken(YARNRunner.java:231)
	at org.apache.hadoop.mapreduce.Cluster.getDelegationToken(Cluster.java:401)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1234)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1231)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.mapred.JobClient.getDelegationToken(JobClient.java:1230)
	at org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob(TestNetworkedJob.java:260)



Hadoop-Mapreduce-trunk - Build # 2490 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2490/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 33411 lines...]
Running org.apache.hadoop.mapreduce.lib.chain.TestSingleElementChain
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.315 sec - in org.apache.hadoop.mapreduce.lib.chain.TestSingleElementChain
Running org.apache.hadoop.mapreduce.lib.chain.TestMapReduceChain
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.653 sec - in org.apache.hadoop.mapreduce.lib.chain.TestMapReduceChain
Running org.apache.hadoop.mapreduce.lib.chain.TestChainErrors
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 7.879 sec - in org.apache.hadoop.mapreduce.lib.chain.TestChainErrors
Running org.apache.hadoop.mapreduce.lib.map.TestMultithreadedMapper
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.689 sec - in org.apache.hadoop.mapreduce.lib.map.TestMultithreadedMapper
Running org.apache.hadoop.mapreduce.TestMapCollection
Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 14.465 sec - in org.apache.hadoop.mapreduce.TestMapCollection
Running org.apache.hadoop.io.TestSequenceFileMergeProgress
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.467 sec - in org.apache.hadoop.io.TestSequenceFileMergeProgress

Results :

Tests in error: 
  TestJobSysDirWithDFS.testWithDFS » Remote File /user/jenkins/wc/input/part-0 c...
  TestNetworkedJob.testNetworkedJob:260 » IO org.apache.hadoop.yarn.exceptions.Y...

Tests run: 519, Failures: 0, Errors: 2, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.754 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:35 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 29.784 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  4.881 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:14 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:37 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:48 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:05 h
[INFO] Finished at: 2015-10-16T05:31:20+00:00
[INFO] Final Memory: 36M/1055M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There was a timeout or other error in the fork -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating YARN-4000
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.mapred.TestJobSysDirWithDFS.testWithDFS

Error Message:
File /user/jenkins/wc/input/part-0 could only be replicated to 0 nodes instead of minReplication (=1).  There are 4 datanode(s) running and 4 node(s) are excluded in this operation.
 at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:1729)
 at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.chooseTargetForNewBlock(FSDirWriteFileOp.java:298)
 at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:2448)
 at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:730)
 at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:500)
 at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
 at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2276)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2272)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2270)


Stack Trace:
org.apache.hadoop.ipc.RemoteException: File /user/jenkins/wc/input/part-0 could only be replicated to 0 nodes instead of minReplication (=1).  There are 4 datanode(s) running and 4 node(s) are excluded in this operation.
	at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:1729)
	at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.chooseTargetForNewBlock(FSDirWriteFileOp.java:298)
	at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:2448)
	at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:730)
	at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:500)
	at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2276)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2272)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2270)

	at org.apache.hadoop.ipc.Client.call(Client.java:1449)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy21.addBlock(Unknown Source)
	at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.addBlock(ClientNamenodeProtocolTranslatorPB.java:404)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:251)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy22.addBlock(Unknown Source)
	at org.apache.hadoop.hdfs.DFSOutputStream.addBlock(DFSOutputStream.java:911)
	at org.apache.hadoop.hdfs.DataStreamer.locateFollowingBlock(DataStreamer.java:1682)
	at org.apache.hadoop.hdfs.DataStreamer.nextBlockOutputStream(DataStreamer.java:1492)
	at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:592)


FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob

Error Message:
org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
 at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
 at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
 at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2276)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2272)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2270)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
 ... 10 more


Stack Trace:
java.io.IOException: org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1043)
	at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
	at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2276)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2272)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2270)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1016)
	... 10 more

	at org.apache.hadoop.ipc.Client.call(Client.java:1449)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy84.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getDelegationToken(ApplicationClientProtocolPBClientImpl.java:339)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:251)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy85.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getRMDelegationToken(YarnClientImpl.java:541)
	at org.apache.hadoop.mapred.ResourceMgrDelegate.getDelegationToken(ResourceMgrDelegate.java:177)
	at org.apache.hadoop.mapred.YARNRunner.getDelegationToken(YARNRunner.java:231)
	at org.apache.hadoop.mapreduce.Cluster.getDelegationToken(Cluster.java:401)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1234)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1231)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.mapred.JobClient.getDelegationToken(JobClient.java:1230)
	at org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob(TestNetworkedJob.java:260)



Hadoop-Mapreduce-trunk - Build # 2489 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2489/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 33398 lines...]
Running org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 64.756 sec - in org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService
Running org.apache.hadoop.mapreduce.v2.TestMRAMWithNonNormalizedCapabilities
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 57.763 sec - in org.apache.hadoop.mapreduce.v2.TestMRAMWithNonNormalizedCapabilities
Running org.apache.hadoop.mapreduce.TestValueIterReset
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.34 sec - in org.apache.hadoop.mapreduce.TestValueIterReset
Running org.apache.hadoop.mapreduce.TestMapReduceLazyOutput
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 139.923 sec - in org.apache.hadoop.mapreduce.TestMapReduceLazyOutput

Results :

Tests in error: 
  TestNetworkedJob.testNetworkedJob:260 » IO org.apache.hadoop.yarn.exceptions.Y...

Tests run: 523, Failures: 0, Errors: 1, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.153 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:45 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 31.535 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  6.402 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:47 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:52 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:49 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:07 h
[INFO] Finished at: 2015-10-16T02:43:52+00:00
[INFO] Final Memory: 36M/1074M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HADOOP-12481
Updating HADOOP-12479
Updating HADOOP-12475
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob

Error Message:
org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1044)
 at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
 at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
 at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2276)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2272)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2270)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1017)
 ... 10 more


Stack Trace:
java.io.IOException: org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1044)
	at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
	at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2276)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2272)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2270)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1017)
	... 10 more

	at org.apache.hadoop.ipc.Client.call(Client.java:1449)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy84.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getDelegationToken(ApplicationClientProtocolPBClientImpl.java:339)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:251)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy85.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getRMDelegationToken(YarnClientImpl.java:541)
	at org.apache.hadoop.mapred.ResourceMgrDelegate.getDelegationToken(ResourceMgrDelegate.java:177)
	at org.apache.hadoop.mapred.YARNRunner.getDelegationToken(YARNRunner.java:231)
	at org.apache.hadoop.mapreduce.Cluster.getDelegationToken(Cluster.java:401)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1234)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1231)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.mapred.JobClient.getDelegationToken(JobClient.java:1230)
	at org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob(TestNetworkedJob.java:260)



Hadoop-Mapreduce-trunk - Build # 2488 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2488/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 38826 lines...]
Running org.apache.hadoop.mapred.TestClientRedirect
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 16.795 sec - in org.apache.hadoop.mapred.TestClientRedirect
Running org.apache.hadoop.mapred.TestReduceFetchFromPartialMem
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 100.833 sec - in org.apache.hadoop.mapred.TestReduceFetchFromPartialMem
Running org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 33.956 sec - in org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Running org.apache.hadoop.mapred.TestSequenceFileAsBinaryOutputFormat
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.613 sec - in org.apache.hadoop.mapred.TestSequenceFileAsBinaryOutputFormat

Results :

Tests in error: 
  TestMRJobsWithProfiler.testDefaultProfiler:111->testProfilerInternal:150 »  te...
  TestMRJobsWithProfiler.testDifferentProfilers:117->testProfilerInternal:150 » 
  TestMRJobsWithProfiler.tearDown:104 » NoClassDefFound org/apache/hadoop/servic...
  TestNetworkedJob.testNetworkedJob:260 » IO org.apache.hadoop.yarn.exceptions.Y...

Tests run: 520, Failures: 0, Errors: 4, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.780 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [02:04 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 36.517 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  6.154 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [10:36 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [06:18 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:52 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:12 h
[INFO] Finished at: 2015-10-15T21:36:25+00:00
[INFO] Final Memory: 34M/741M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: ExecutionException: java.lang.RuntimeException: The forked VM terminated without properly saying goodbye. VM crash or System.exit called?
[ERROR] Command was /bin/sh -c cd /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient && /home/jenkins/tools/java/jdk1.7.0_55/jre/bin/java -Xmx2048m -XX:MaxPermSize=768m -XX:+HeapDumpOnOutOfMemoryError -jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefirebooter7704373766481781823.jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefire1619826421057703938tmp /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire/surefire_2442119865493552570370tmp
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HADOOP-12475
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
4 tests failed.
FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob

Error Message:
org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1044)
 at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
 at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
 at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2276)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2272)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2270)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1017)
 ... 10 more


Stack Trace:
java.io.IOException: org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1044)
	at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
	at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2276)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2272)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2270)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1017)
	... 10 more

	at org.apache.hadoop.ipc.Client.call(Client.java:1449)
	at org.apache.hadoop.ipc.Client.call(Client.java:1380)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy84.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getDelegationToken(ApplicationClientProtocolPBClientImpl.java:339)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:251)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy85.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getRMDelegationToken(YarnClientImpl.java:541)
	at org.apache.hadoop.mapred.ResourceMgrDelegate.getDelegationToken(ResourceMgrDelegate.java:177)
	at org.apache.hadoop.mapred.YARNRunner.getDelegationToken(YARNRunner.java:231)
	at org.apache.hadoop.mapreduce.Cluster.getDelegationToken(Cluster.java:401)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1234)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1231)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.mapred.JobClient.getDelegationToken(JobClient.java:1230)
	at org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob(TestNetworkedJob.java:260)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testDefaultProfiler

Error Message:
test timed out after 150000 milliseconds

Stack Trace:
java.lang.Exception: test timed out after 150000 milliseconds
	at java.lang.Thread.sleep(Native Method)
	at org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1353)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1311)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testProfilerInternal(TestMRJobsWithProfiler.java:150)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testDefaultProfiler(TestMRJobsWithProfiler.java:111)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testDifferentProfilers

Error Message:
test timed out after 150000 milliseconds

Stack Trace:
java.lang.Exception: test timed out after 150000 milliseconds
	at java.lang.Thread.sleep(Native Method)
	at org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1353)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1311)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testProfilerInternal(TestMRJobsWithProfiler.java:150)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testDifferentProfilers(TestMRJobsWithProfiler.java:117)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler

Error Message:
org/apache/hadoop/service/ServiceOperations

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/service/ServiceOperations
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.service.CompositeService.stop(CompositeService.java:157)
	at org.apache.hadoop.service.CompositeService.serviceStop(CompositeService.java:131)
	at org.apache.hadoop.service.AbstractService.stop(AbstractService.java:221)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.tearDown(TestMRJobsWithProfiler.java:104)



Hadoop-Mapreduce-trunk - Build # 2487 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2487/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32851 lines...]
Running org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebApp
Tests run: 12, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 23.793 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebApp
Running org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobConf
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 8.182 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobConf
Running org.apache.hadoop.mapred.TestTaskAttemptListenerImpl
Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.954 sec - in org.apache.hadoop.mapred.TestTaskAttemptListenerImpl
Running org.apache.hadoop.mapred.TestTaskAttemptFinishingMonitor
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.421 sec - in org.apache.hadoop.mapred.TestTaskAttemptFinishingMonitor
Running org.apache.hadoop.mapred.TestLocalContainerLauncher
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.675 sec - in org.apache.hadoop.mapred.TestLocalContainerLauncher

Results :

Failed tests: 
  TestJobImpl.testUnusableNodeTransition:627->assertJobState:977 expected:<SUCCEEDED> but was:<ERROR>

Tests run: 339, Failures: 1, Errors: 0, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.661 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:58 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 35.361 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  6.060 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [10:45 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 13:30 min
[INFO] Finished at: 2015-10-15T17:21:03+00:00
[INFO] Final Memory: 41M/825M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9220
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl.testUnusableNodeTransition

Error Message:
expected:<SUCCEEDED> but was:<ERROR>

Stack Trace:
java.lang.AssertionError: expected:<SUCCEEDED> but was:<ERROR>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:144)
	at org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl.assertJobState(TestJobImpl.java:977)
	at org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl.testUnusableNodeTransition(TestJobImpl.java:627)



Hadoop-Mapreduce-trunk - Build # 2486 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2486/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32962 lines...]
Running org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesTasks
Tests run: 20, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 11.28 sec - in org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesTasks
Running org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobConf
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 6.722 sec - in org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobConf
Running org.apache.hadoop.mapreduce.v2.hs.webapp.TestBlocks
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.749 sec - in org.apache.hadoop.mapreduce.v2.hs.webapp.TestBlocks
Running org.apache.hadoop.mapreduce.v2.hs.webapp.dao.TestJobInfo
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.54 sec - in org.apache.hadoop.mapreduce.v2.hs.webapp.dao.TestJobInfo
Running org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesAttempts
Tests run: 15, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 9.503 sec - in org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesAttempts
Running org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServices
Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 7.589 sec - in org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServices
Running org.apache.hadoop.mapreduce.v2.hs.TestJobListCache
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.793 sec - in org.apache.hadoop.mapreduce.v2.hs.TestJobListCache

Results :

Tests run: 195, Failures: 0, Errors: 0, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.938 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:51 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 32.280 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.293 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:46 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. FAILURE [05:25 min]
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 17:45 min
[INFO] Finished at: 2015-10-15T16:21:20+00:00
[INFO] Final Memory: 47M/1180M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-hs: ExecutionException: java.lang.RuntimeException: The forked VM terminated without properly saying goodbye. VM crash or System.exit called?
[ERROR] Command was /bin/sh -c cd /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs && /home/jenkins/tools/java/jdk1.7.0_55/jre/bin/java -Xmx2048m -XX:MaxPermSize=768m -XX:+HeapDumpOnOutOfMemoryError -jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/target/surefire/surefirebooter3013196664178914024.jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/target/surefire/surefire6859828389135744078tmp /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/target/surefire/surefire_1143212547753421410637tmp
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-hs
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HADOOP-12436
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
All tests passed

Hadoop-Mapreduce-trunk - Build # 2485 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2485/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 36155 lines...]
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 14.364 sec - in org.apache.hadoop.mapred.TestReporter
Running org.apache.hadoop.mapred.TestClientRedirect
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 16.52 sec - in org.apache.hadoop.mapred.TestClientRedirect
Running org.apache.hadoop.mapred.TestReduceFetchFromPartialMem
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 83.252 sec - in org.apache.hadoop.mapred.TestReduceFetchFromPartialMem
Running org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 29.353 sec - in org.apache.hadoop.mapred.TestSpecialCharactersInOutputPath
Running org.apache.hadoop.mapred.TestSequenceFileAsBinaryOutputFormat
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.645 sec - in org.apache.hadoop.mapred.TestSequenceFileAsBinaryOutputFormat

Results :

Tests in error: 
  TestMRJobsWithProfiler.testDifferentProfilers:117->testProfilerInternal:155 » 
  TestNetworkedJob.testNetworkedJob:260 » IO org.apache.hadoop.yarn.exceptions.Y...

Tests run: 523, Failures: 0, Errors: 2, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.446 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:58 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 35.355 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.993 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [10:17 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [06:10 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:55 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:14 h
[INFO] Finished at: 2015-10-15T15:20:06+00:00
[INFO] Final Memory: 34M/708M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9157
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob

Error Message:
org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1044)
 at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
 at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
 at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2276)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2272)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2270)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1017)
 ... 10 more


Stack Trace:
java.io.IOException: org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1044)
	at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
	at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2276)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2272)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2270)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1017)
	... 10 more

	at org.apache.hadoop.ipc.Client.call(Client.java:1448)
	at org.apache.hadoop.ipc.Client.call(Client.java:1379)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy84.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getDelegationToken(ApplicationClientProtocolPBClientImpl.java:339)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:251)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy85.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getRMDelegationToken(YarnClientImpl.java:541)
	at org.apache.hadoop.mapred.ResourceMgrDelegate.getDelegationToken(ResourceMgrDelegate.java:177)
	at org.apache.hadoop.mapred.YARNRunner.getDelegationToken(YARNRunner.java:231)
	at org.apache.hadoop.mapreduce.Cluster.getDelegationToken(Cluster.java:401)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1234)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1231)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.mapred.JobClient.getDelegationToken(JobClient.java:1230)
	at org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob(TestNetworkedJob.java:260)


FAILED:  org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testDifferentProfilers

Error Message:
test timed out after 150000 milliseconds

Stack Trace:
java.lang.Exception: test timed out after 150000 milliseconds
	at java.lang.Thread.sleep(Native Method)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testProfilerInternal(TestMRJobsWithProfiler.java:155)
	at org.apache.hadoop.mapreduce.v2.TestMRJobsWithProfiler.testDifferentProfilers(TestMRJobsWithProfiler.java:117)



Hadoop-Mapreduce-trunk - Build # 2484 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2484/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32938 lines...]
Running org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesAcls
Tests run: 9, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.027 sec - in org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesAcls
Running org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery
Tests run: 25, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 16.704 sec - in org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery
Running org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesAttempts
Killed
Running org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServices
Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 8.684 sec - in org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServices
Running org.apache.hadoop.mapreduce.v2.hs.webapp.TestBlocks
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.953 sec - in org.apache.hadoop.mapreduce.v2.hs.webapp.TestBlocks
Running org.apache.hadoop.mapreduce.v2.hs.webapp.TestHSWebApp
Tests run: 15, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 13.05 sec - in org.apache.hadoop.mapreduce.v2.hs.webapp.TestHSWebApp
Running org.apache.hadoop.mapreduce.v2.hs.TestCompletedTask
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.017 sec - in org.apache.hadoop.mapreduce.v2.hs.TestCompletedTask

Results :

Tests run: 183, Failures: 0, Errors: 0, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.618 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [02:02 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 37.782 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  6.468 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [11:16 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. FAILURE [06:28 min]
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 20:37 min
[INFO] Finished at: 2015-10-15T12:25:25+00:00
[INFO] Final Memory: 57M/1227M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-hs: ExecutionException: java.lang.RuntimeException: The forked VM terminated without properly saying goodbye. VM crash or System.exit called?
[ERROR] Command was /bin/sh -c cd /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs && /home/jenkins/tools/java/jdk1.7.0_55/jre/bin/java -Xmx2048m -XX:MaxPermSize=768m -XX:+HeapDumpOnOutOfMemoryError -jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/target/surefire/surefirebooter2157191259567933694.jar /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/target/surefire/surefire2505084597527677959tmp /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/target/surefire/surefire_1285141373464626407786tmp
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-hs
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9205
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
All tests passed

Hadoop-Mapreduce-trunk - Build # 2483 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2483/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 33398 lines...]
Running org.apache.hadoop.mapreduce.lib.output.TestJobOutputCommitter
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 228.998 sec - in org.apache.hadoop.mapreduce.lib.output.TestJobOutputCommitter
Running org.apache.hadoop.mapreduce.lib.output.TestMRCJCFileOutputCommitter
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.555 sec - in org.apache.hadoop.mapreduce.lib.output.TestMRCJCFileOutputCommitter
Running org.apache.hadoop.util.TestMRCJCReflectionUtils
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.648 sec - in org.apache.hadoop.util.TestMRCJCReflectionUtils
Running org.apache.hadoop.util.TestMRCJCRunJar
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.253 sec - in org.apache.hadoop.util.TestMRCJCRunJar
Running org.apache.hadoop.io.TestSequenceFileMergeProgress
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.387 sec - in org.apache.hadoop.io.TestSequenceFileMergeProgress

Results :

Tests in error: 
  TestNetworkedJob.testNetworkedJob:260 » IO org.apache.hadoop.yarn.exceptions.Y...

Tests run: 523, Failures: 0, Errors: 1, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.889 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:37 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 29.907 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  4.868 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:18 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:37 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:39 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 01:56 h
[INFO] Finished at: 2015-10-15T11:29:34+00:00
[INFO] Final Memory: 34M/750M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating YARN-4258
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob

Error Message:
org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1044)
 at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
 at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
 at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2276)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2272)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2270)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1017)
 ... 10 more


Stack Trace:
java.io.IOException: org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1044)
	at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
	at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2276)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2272)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2270)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1017)
	... 10 more

	at org.apache.hadoop.ipc.Client.call(Client.java:1448)
	at org.apache.hadoop.ipc.Client.call(Client.java:1379)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy84.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getDelegationToken(ApplicationClientProtocolPBClientImpl.java:339)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:251)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy85.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getRMDelegationToken(YarnClientImpl.java:541)
	at org.apache.hadoop.mapred.ResourceMgrDelegate.getDelegationToken(ResourceMgrDelegate.java:177)
	at org.apache.hadoop.mapred.YARNRunner.getDelegationToken(YARNRunner.java:231)
	at org.apache.hadoop.mapreduce.Cluster.getDelegationToken(Cluster.java:401)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1234)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1231)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.mapred.JobClient.getDelegationToken(JobClient.java:1230)
	at org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob(TestNetworkedJob.java:260)



Hadoop-Mapreduce-trunk - Build # 2482 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2482/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32852 lines...]
Running org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices
Tests run: 13, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 8.417 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices
Running org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts
Tests run: 15, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 9.945 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts
Running org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs
Tests run: 21, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 11.262 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs
Running org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.4 sec - in org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt
Running org.apache.hadoop.mapreduce.TestMapreduceConfigFields
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.92 sec - in org.apache.hadoop.mapreduce.TestMapreduceConfigFields

Results :

Failed tests: 
  TestJobImpl.testUnusableNodeTransition:627->assertJobState:977 expected:<SUCCEEDED> but was:<ERROR>

Tests run: 339, Failures: 1, Errors: 0, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.600 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:47 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 32.950 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  6.640 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [09:48 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 12:21 min
[INFO] Finished at: 2015-10-15T05:53:15+00:00
[INFO] Final Memory: 40M/825M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9188
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl.testUnusableNodeTransition

Error Message:
expected:<SUCCEEDED> but was:<ERROR>

Stack Trace:
java.lang.AssertionError: expected:<SUCCEEDED> but was:<ERROR>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:144)
	at org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl.assertJobState(TestJobImpl.java:977)
	at org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl.testUnusableNodeTransition(TestJobImpl.java:627)



Hadoop-Mapreduce-trunk - Build # 2481 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2481/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 33404 lines...]
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 187.016 sec - in org.apache.hadoop.mapreduce.TestMRJobClient
Running org.apache.hadoop.mapreduce.TestMapCollection
Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 13.972 sec - in org.apache.hadoop.mapreduce.TestMapCollection
Running org.apache.hadoop.conf.TestNoDefaultsJobConf
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 23.189 sec - in org.apache.hadoop.conf.TestNoDefaultsJobConf
Running org.apache.hadoop.util.TestMRCJCReflectionUtils
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.647 sec - in org.apache.hadoop.util.TestMRCJCReflectionUtils
Running org.apache.hadoop.util.TestMRCJCRunJar
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.257 sec - in org.apache.hadoop.util.TestMRCJCRunJar

Results :

Tests in error: 
  TestNetworkedJob.testNetworkedJob:260 » IO org.apache.hadoop.yarn.exceptions.Y...

Tests run: 523, Failures: 0, Errors: 1, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.864 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:38 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 29.828 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.974 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:15 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:40 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:41 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 01:58 h
[INFO] Finished at: 2015-10-15T03:46:25+00:00
[INFO] Final Memory: 34M/708M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9210
Updating HDFS-9223
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob

Error Message:
org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1044)
 at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
 at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
 at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2276)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2272)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2270)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1017)
 ... 10 more


Stack Trace:
java.io.IOException: org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1044)
	at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
	at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2276)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2272)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2270)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1017)
	... 10 more

	at org.apache.hadoop.ipc.Client.call(Client.java:1448)
	at org.apache.hadoop.ipc.Client.call(Client.java:1379)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy84.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getDelegationToken(ApplicationClientProtocolPBClientImpl.java:339)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:251)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy85.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getRMDelegationToken(YarnClientImpl.java:541)
	at org.apache.hadoop.mapred.ResourceMgrDelegate.getDelegationToken(ResourceMgrDelegate.java:177)
	at org.apache.hadoop.mapred.YARNRunner.getDelegationToken(YARNRunner.java:231)
	at org.apache.hadoop.mapreduce.Cluster.getDelegationToken(Cluster.java:401)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1234)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1231)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.mapred.JobClient.getDelegationToken(JobClient.java:1230)
	at org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob(TestNetworkedJob.java:260)



Hadoop-Mapreduce-trunk - Build # 2480 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2480/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 33655 lines...]

Results :

Failed tests: 
  TestLargeSort.testLargeSort:62 Large sort failed for 256 expected:<0> but was:<1>

Tests in error: 
  TestNetworkedJob.testNetworkedJob:260 » IO org.apache.hadoop.yarn.exceptions.Y...
  TestMRCJCSocketFactory.testSocketFactory:85->initAndStartMiniMRYarnCluster:112 » YarnRuntime
  TestFileSystem.testFsCache:513->runTestCache:553 » NoClassDefFound org/apache/...
  TestFileSystem.testFs:70->testFs:83->createControlFile:124 NoClassDefFound org...
  TestFileSystem.testCommandFormat:96 NoClassDefFound org/apache/hadoop/fs/shell...
  TestNoDefaultsJobConf>HadoopTestCase.setUp:157 » YarnRuntime java.lang.NoClass...
  TestLargeSort.cleanup:47 » YarnRuntime java.lang.reflect.InvocationTargetExcep...

Tests run: 524, Failures: 1, Errors: 7, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.822 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:34 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 29.372 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  4.804 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:08 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:35 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:42 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 01:59 h
[INFO] Finished at: 2015-10-14T23:56:37+00:00
[INFO] Final Memory: 34M/750M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9238
Updating HDFS-9210
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
8 tests failed.
FAILED:  org.apache.hadoop.conf.TestNoDefaultsJobConf.testNoDefaults

Error Message:
java.lang.NoClassDefFoundError: org/apache/hadoop/hdfs/protocol/proto/ClientNamenodeProtocolProtos$GetListingRequestProto$Builder

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: java.lang.NoClassDefFoundError: org/apache/hadoop/hdfs/protocol/proto/ClientNamenodeProtocolProtos$GetListingRequestProto$Builder
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster$JobHistoryServerWrapper.serviceStart(MiniMRYarnCluster.java:226)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.service.CompositeService.serviceStart(CompositeService.java:120)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:80)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.HadoopTestCase.setUp(HadoopTestCase.java:157)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)
Caused by: java.lang.NoClassDefFoundError: org/apache/hadoop/hdfs/protocol/proto/ClientNamenodeProtocolProtos$GetListingRequestProto$Builder
	at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$GetListingRequestProto.newBuilder(ClientNamenodeProtocolProtos.java:29094)
	at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.getListing(ClientNamenodeProtocolTranslatorPB.java:542)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:251)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy19.getListing(Unknown Source)
	at org.apache.hadoop.hdfs.DFSClient.listPaths(DFSClient.java:1569)
	at org.apache.hadoop.fs.Hdfs$DirListingIterator.<init>(Hdfs.java:211)
	at org.apache.hadoop.fs.Hdfs$DirListingIterator.<init>(Hdfs.java:198)
	at org.apache.hadoop.fs.Hdfs$2.<init>(Hdfs.java:180)
	at org.apache.hadoop.fs.Hdfs.listStatusIterator(Hdfs.java:180)
	at org.apache.hadoop.fs.FileContext$22.next(FileContext.java:1492)
	at org.apache.hadoop.fs.FileContext$22.next(FileContext.java:1487)
	at org.apache.hadoop.fs.FSLinkResolver.resolve(FSLinkResolver.java:90)
	at org.apache.hadoop.fs.FileContext.listStatus(FileContext.java:1494)
	at org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils.localGlobber(JobHistoryUtils.java:456)
	at org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils.localGlobber(JobHistoryUtils.java:444)
	at org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils.localGlobber(JobHistoryUtils.java:439)
	at org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.findTimestampedDirectories(HistoryFileManager.java:778)
	at org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.initExisting(HistoryFileManager.java:672)
	at org.apache.hadoop.mapreduce.v2.hs.JobHistory.serviceInit(JobHistory.java:97)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer.serviceInit(JobHistoryServer.java:151)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster$JobHistoryServerWrapper.serviceStart(MiniMRYarnCluster.java:210)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.service.CompositeService.serviceStart(CompositeService.java:120)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:80)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.HadoopTestCase.setUp(HadoopTestCase.java:157)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)
Caused by: java.lang.ClassNotFoundException: org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$GetListingRequestProto$Builder
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$GetListingRequestProto.newBuilder(ClientNamenodeProtocolProtos.java:29094)
	at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.getListing(ClientNamenodeProtocolTranslatorPB.java:542)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:251)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy19.getListing(Unknown Source)
	at org.apache.hadoop.hdfs.DFSClient.listPaths(DFSClient.java:1569)
	at org.apache.hadoop.fs.Hdfs$DirListingIterator.<init>(Hdfs.java:211)
	at org.apache.hadoop.fs.Hdfs$DirListingIterator.<init>(Hdfs.java:198)
	at org.apache.hadoop.fs.Hdfs$2.<init>(Hdfs.java:180)
	at org.apache.hadoop.fs.Hdfs.listStatusIterator(Hdfs.java:180)
	at org.apache.hadoop.fs.FileContext$22.next(FileContext.java:1492)
	at org.apache.hadoop.fs.FileContext$22.next(FileContext.java:1487)
	at org.apache.hadoop.fs.FSLinkResolver.resolve(FSLinkResolver.java:90)
	at org.apache.hadoop.fs.FileContext.listStatus(FileContext.java:1494)
	at org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils.localGlobber(JobHistoryUtils.java:456)
	at org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils.localGlobber(JobHistoryUtils.java:444)
	at org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils.localGlobber(JobHistoryUtils.java:439)
	at org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.findTimestampedDirectories(HistoryFileManager.java:778)
	at org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.initExisting(HistoryFileManager.java:672)
	at org.apache.hadoop.mapreduce.v2.hs.JobHistory.serviceInit(JobHistory.java:97)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer.serviceInit(JobHistoryServer.java:151)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster$JobHistoryServerWrapper.serviceStart(MiniMRYarnCluster.java:210)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.service.CompositeService.serviceStart(CompositeService.java:120)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.mapred.MiniMRClientClusterFactory.create(MiniMRClientClusterFactory.java:80)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:187)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:175)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:167)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:159)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:152)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:145)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:138)
	at org.apache.hadoop.mapred.MiniMRCluster.<init>(MiniMRCluster.java:133)
	at org.apache.hadoop.mapred.HadoopTestCase.setUp(HadoopTestCase.java:157)
	at junit.framework.TestCase.runBare(TestCase.java:139)
	at junit.framework.TestResult$1.protect(TestResult.java:122)
	at junit.framework.TestResult.runProtected(TestResult.java:142)
	at junit.framework.TestResult.run(TestResult.java:125)
	at junit.framework.TestCase.run(TestCase.java:129)
	at junit.framework.TestSuite.runTest(TestSuite.java:255)
	at junit.framework.TestSuite.run(TestSuite.java:250)
	at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:84)
	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:264)
	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:124)
	at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:200)
	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:153)
	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)


FAILED:  org.apache.hadoop.fs.TestFileSystem.testFsCache

Error Message:
org/apache/hadoop/util/IntrusiveCollection$IntrusiveIterator

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/util/IntrusiveCollection$IntrusiveIterator
	at org.apache.hadoop.util.IntrusiveCollection.iterator(IntrusiveCollection.java:213)
	at org.apache.hadoop.util.IntrusiveCollection.clear(IntrusiveCollection.java:368)
	at org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager.clearPendingCachingCommands(DatanodeManager.java:1570)
	at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.stopActiveServices(FSNamesystem.java:1218)
	at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.close(FSNamesystem.java:1578)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.stopCommonServices(NameNode.java:723)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.stop(NameNode.java:893)
	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:1892)
	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:1861)
	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:1854)
	at org.apache.hadoop.fs.TestFileSystem.runTestCache(TestFileSystem.java:553)
	at org.apache.hadoop.fs.TestFileSystem.testFsCache(TestFileSystem.java:513)


FAILED:  org.apache.hadoop.fs.TestFileSystem.testFs

Error Message:
org/apache/hadoop/io/LongWritable

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/io/LongWritable
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.fs.TestFileSystem.createControlFile(TestFileSystem.java:124)
	at org.apache.hadoop.fs.TestFileSystem.testFs(TestFileSystem.java:83)
	at org.apache.hadoop.fs.TestFileSystem.testFs(TestFileSystem.java:70)


FAILED:  org.apache.hadoop.fs.TestFileSystem.testCommandFormat

Error Message:
org/apache/hadoop/fs/shell/CommandFormat

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/fs/shell/CommandFormat
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.fs.TestFileSystem.testCommandFormat(TestFileSystem.java:96)


FAILED:  org.apache.hadoop.ipc.TestMRCJCSocketFactory.testSocketFactory

Error Message:
java.io.IOException: ResourceManager failed to start. Final state is STOPPED

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: java.io.IOException: ResourceManager failed to start. Final state is STOPPED
	at org.apache.hadoop.yarn.server.MiniYARNCluster.startResourceManager(MiniYARNCluster.java:330)
	at org.apache.hadoop.yarn.server.MiniYARNCluster.access$500(MiniYARNCluster.java:99)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$ResourceManagerWrapper.serviceStart(MiniYARNCluster.java:456)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.service.CompositeService.serviceStart(CompositeService.java:120)
	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
	at org.apache.hadoop.ipc.TestMRCJCSocketFactory.initAndStartMiniMRYarnCluster(TestMRCJCSocketFactory.java:112)
	at org.apache.hadoop.ipc.TestMRCJCSocketFactory.testSocketFactory(TestMRCJCSocketFactory.java:85)


FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob

Error Message:
org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1044)
 at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
 at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
 at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2276)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2272)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2270)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1017)
 ... 10 more


Stack Trace:
java.io.IOException: org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1044)
	at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
	at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2276)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2272)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2270)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1017)
	... 10 more

	at org.apache.hadoop.ipc.Client.call(Client.java:1448)
	at org.apache.hadoop.ipc.Client.call(Client.java:1379)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy84.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getDelegationToken(ApplicationClientProtocolPBClientImpl.java:339)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:251)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy85.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getRMDelegationToken(YarnClientImpl.java:541)
	at org.apache.hadoop.mapred.ResourceMgrDelegate.getDelegationToken(ResourceMgrDelegate.java:177)
	at org.apache.hadoop.mapred.YARNRunner.getDelegationToken(YARNRunner.java:231)
	at org.apache.hadoop.mapreduce.Cluster.getDelegationToken(Cluster.java:401)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1234)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1231)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.mapred.JobClient.getDelegationToken(JobClient.java:1230)
	at org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob(TestNetworkedJob.java:260)


FAILED:  org.apache.hadoop.mapreduce.TestLargeSort.testLargeSort

Error Message:
Large sort failed for 256 expected:<0> but was:<1>

Stack Trace:
java.lang.AssertionError: Large sort failed for 256 expected:<0> but was:<1>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:555)
	at org.apache.hadoop.mapreduce.TestLargeSort.testLargeSort(TestLargeSort.java:62)


FAILED:  org.apache.hadoop.mapreduce.TestLargeSort.testLargeSort

Error Message:
java.lang.reflect.InvocationTargetException

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: java.lang.reflect.InvocationTargetException
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos$UnRegisterNodeManagerRequestProto.<clinit>(YarnServerCommonServiceProtos.java:4118)
	at org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UnRegisterNodeManagerRequestPBImpl.<init>(UnRegisterNodeManagerRequestPBImpl.java:33)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
	at org.apache.hadoop.yarn.factories.impl.pb.RecordFactoryPBImpl.newRecordInstance(RecordFactoryPBImpl.java:70)
	at org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl.unRegisterNM(NodeStatusUpdaterImpl.java:263)
	at org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl.serviceStop(NodeStatusUpdaterImpl.java:245)
	at org.apache.hadoop.service.AbstractService.stop(AbstractService.java:221)
	at org.apache.hadoop.service.ServiceOperations.stop(ServiceOperations.java:52)
	at org.apache.hadoop.service.ServiceOperations.stopQuietly(ServiceOperations.java:80)
	at org.apache.hadoop.service.CompositeService.stop(CompositeService.java:157)
	at org.apache.hadoop.service.CompositeService.serviceStop(CompositeService.java:131)
	at org.apache.hadoop.yarn.server.nodemanager.NodeManager.serviceStop(NodeManager.java:373)
	at org.apache.hadoop.service.AbstractService.stop(AbstractService.java:221)
	at org.apache.hadoop.yarn.server.MiniYARNCluster$NodeManagerWrapper.serviceStop(MiniYARNCluster.java:590)
	at org.apache.hadoop.service.AbstractService.stop(AbstractService.java:221)
	at org.apache.hadoop.service.ServiceOperations.stop(ServiceOperations.java:52)
	at org.apache.hadoop.service.ServiceOperations.stopQuietly(ServiceOperations.java:80)
	at org.apache.hadoop.service.CompositeService.stop(CompositeService.java:157)
	at org.apache.hadoop.service.CompositeService.serviceStop(CompositeService.java:131)
	at org.apache.hadoop.service.AbstractService.stop(AbstractService.java:221)
	at org.apache.hadoop.mapred.MiniMRYarnClusterAdapter.stop(MiniMRYarnClusterAdapter.java:55)
	at org.apache.hadoop.mapreduce.TestLargeSort.cleanup(TestLargeSort.java:47)



Hadoop-Mapreduce-trunk - Build # 2479 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2479/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 33037 lines...]
Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 9.281 sec - in org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServices
Running org.apache.hadoop.mapreduce.v2.hs.webapp.TestBlocks
Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.022 sec - in org.apache.hadoop.mapreduce.v2.hs.webapp.TestBlocks
Running org.apache.hadoop.mapreduce.v2.hs.webapp.TestHSWebApp
Tests run: 15, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 14.268 sec - in org.apache.hadoop.mapreduce.v2.hs.webapp.TestHSWebApp
Running org.apache.hadoop.mapreduce.v2.hs.TestCompletedTask
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.17 sec - in org.apache.hadoop.mapreduce.v2.hs.TestCompletedTask

Results :

Tests in error: 
  TestJobHistoryEvents.testEventsFlushOnStop:114 » NoClassDefFound org/apache/ha...
  TestJobHistoryEvents.testJobHistoryEventHandlerIsFirstServiceToStop:147 NoClassDefFound
  TestJobHistoryEvents.testHistoryEvents:54 NoClassDefFound org.apache.hadoop.ma...
  TestJobHistoryEvents.testAssignedQueue:162 NoClassDefFound org.apache.hadoop.m...

Tests run: 198, Failures: 0, Errors: 4, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.932 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [02:09 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 37.892 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  6.538 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [10:40 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. FAILURE [05:40 min]
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 19:21 min
[INFO] Finished at: 2015-10-14T20:53:31+00:00
[INFO] Final Memory: 47M/1225M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-hs: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-hs
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HADOOP-12478
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
4 tests failed.
FAILED:  org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.testEventsFlushOnStop

Error Message:
org/apache/hadoop/yarn/factories/RecordFactory

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/factories/RecordFactory
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.ClassLoader.defineClass1(Native Method)
	at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
	at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
	at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at java.lang.Class.forName0(Native Method)
	at java.lang.Class.forName(Class.java:190)
	at org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider.getFactoryClassInstance(RecordFactoryProvider.java:57)
	at org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider.getRecordFactory(RecordFactoryProvider.java:52)
	at org.apache.hadoop.yarn.util.Records.<clinit>(Records.java:32)
	at org.apache.hadoop.yarn.api.records.ApplicationId.newInstance(ApplicationId.java:49)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<clinit>(MRApp.java:134)
	at org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.testEventsFlushOnStop(TestJobHistoryEvents.java:114)


FAILED:  org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.testJobHistoryEventHandlerIsFirstServiceToStop

Error Message:
Could not initialize class org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents$MRAppWithSpecialHistoryHandler

Stack Trace:
java.lang.NoClassDefFoundError: Could not initialize class org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents$MRAppWithSpecialHistoryHandler
	at org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.testJobHistoryEventHandlerIsFirstServiceToStop(TestJobHistoryEvents.java:147)


FAILED:  org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.testHistoryEvents

Error Message:
org.apache.hadoop.mapreduce.v2.app.MRApp

Stack Trace:
java.lang.NoClassDefFoundError: org.apache.hadoop.mapreduce.v2.app.MRApp
	at org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.testHistoryEvents(TestJobHistoryEvents.java:54)


FAILED:  org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.testAssignedQueue

Error Message:
org.apache.hadoop.mapreduce.v2.app.MRApp

Stack Trace:
java.lang.NoClassDefFoundError: org.apache.hadoop.mapreduce.v2.app.MRApp
	at org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.testAssignedQueue(TestJobHistoryEvents.java:162)



Hadoop-Mapreduce-trunk - Build # 2478 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2478/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32974 lines...]
Running org.apache.hadoop.mapred.TestTaskAttemptListenerImpl
Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.364 sec - in org.apache.hadoop.mapred.TestTaskAttemptListenerImpl
Running org.apache.hadoop.mapred.TestLocalContainerLauncher
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.925 sec - in org.apache.hadoop.mapred.TestLocalContainerLauncher

Results :

Tests in error: 
  TestRecovery.testOutputRecovery:862->writeOutput:1877 NoClassDefFound org/apac...
  TestRecovery.testOutputRecoveryMapsOnly:930 » YarnRuntime could not cleanup te...
  TestRecovery.testRecoverySuccessUsingCustomOutputCommitter:459 » YarnRuntime c...
  TestRecovery.testCrashed:130 » YarnRuntime could not cleanup test dir
  TestRecovery.testSpeculative:1164 » YarnRuntime could not cleanup test dir
  TestRecovery.testRecoveryWithoutShuffleSecret:1316 » YarnRuntime could not cle...
  TestRecovery.testRecoveryWithOldCommiter:1045 » YarnRuntime could not cleanup ...

Tests run: 339, Failures: 0, Errors: 7, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.002 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:36 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 30.176 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  4.946 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [08:46 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 11:01 min
[INFO] Finished at: 2015-10-14T19:20:14+00:00
[INFO] Final Memory: 40M/825M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HADOOP-12364
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
7 tests failed.
FAILED:  org.apache.hadoop.mapreduce.v2.app.TestRecovery.testOutputRecovery

Error Message:
org/apache/hadoop/io/NullWritable

Stack Trace:
java.lang.NoClassDefFoundError: org/apache/hadoop/io/NullWritable
	at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
	at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
	at java.security.AccessController.doPrivileged(Native Method)
	at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
	at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
	at org.apache.hadoop.mapreduce.v2.app.TestRecovery.writeOutput(TestRecovery.java:1877)
	at org.apache.hadoop.mapreduce.v2.app.TestRecovery.testOutputRecovery(TestRecovery.java:862)


FAILED:  org.apache.hadoop.mapreduce.v2.app.TestRecovery.testOutputRecoveryMapsOnly

Error Message:
could not cleanup test dir

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: could not cleanup test dir
	at org.apache.hadoop.fs.AbstractFileSystem.createFileSystem(AbstractFileSystem.java:161)
	at org.apache.hadoop.fs.AbstractFileSystem.get(AbstractFileSystem.java:250)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:332)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:329)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.fs.FileContext.getAbstractFileSystem(FileContext.java:329)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:446)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:423)
	at org.apache.hadoop.fs.FileContext.getLocalFSFileContext(FileContext.java:409)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:239)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:208)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:189)
	at org.apache.hadoop.mapreduce.v2.app.TestRecovery$MRAppWithHistory.<init>(TestRecovery.java:1928)
	at org.apache.hadoop.mapreduce.v2.app.TestRecovery.testOutputRecoveryMapsOnly(TestRecovery.java:930)


FAILED:  org.apache.hadoop.mapreduce.v2.app.TestRecovery.testRecoverySuccessUsingCustomOutputCommitter

Error Message:
could not cleanup test dir

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: could not cleanup test dir
	at org.apache.hadoop.fs.AbstractFileSystem.createFileSystem(AbstractFileSystem.java:161)
	at org.apache.hadoop.fs.AbstractFileSystem.get(AbstractFileSystem.java:250)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:332)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:329)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.fs.FileContext.getAbstractFileSystem(FileContext.java:329)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:446)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:423)
	at org.apache.hadoop.fs.FileContext.getLocalFSFileContext(FileContext.java:409)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:239)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:208)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:189)
	at org.apache.hadoop.mapreduce.v2.app.TestRecovery$MRAppWithHistory.<init>(TestRecovery.java:1928)
	at org.apache.hadoop.mapreduce.v2.app.TestRecovery.testRecoverySuccessUsingCustomOutputCommitter(TestRecovery.java:459)


FAILED:  org.apache.hadoop.mapreduce.v2.app.TestRecovery.testCrashed

Error Message:
could not cleanup test dir

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: could not cleanup test dir
	at org.apache.hadoop.fs.AbstractFileSystem.createFileSystem(AbstractFileSystem.java:161)
	at org.apache.hadoop.fs.AbstractFileSystem.get(AbstractFileSystem.java:250)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:332)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:329)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.fs.FileContext.getAbstractFileSystem(FileContext.java:329)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:446)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:423)
	at org.apache.hadoop.fs.FileContext.getLocalFSFileContext(FileContext.java:409)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:239)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:208)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:189)
	at org.apache.hadoop.mapreduce.v2.app.TestRecovery$MRAppWithHistory.<init>(TestRecovery.java:1928)
	at org.apache.hadoop.mapreduce.v2.app.TestRecovery.testCrashed(TestRecovery.java:130)


FAILED:  org.apache.hadoop.mapreduce.v2.app.TestRecovery.testSpeculative

Error Message:
could not cleanup test dir

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: could not cleanup test dir
	at org.apache.hadoop.fs.AbstractFileSystem.createFileSystem(AbstractFileSystem.java:161)
	at org.apache.hadoop.fs.AbstractFileSystem.get(AbstractFileSystem.java:250)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:332)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:329)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.fs.FileContext.getAbstractFileSystem(FileContext.java:329)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:446)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:423)
	at org.apache.hadoop.fs.FileContext.getLocalFSFileContext(FileContext.java:409)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:239)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:208)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:189)
	at org.apache.hadoop.mapreduce.v2.app.TestRecovery$MRAppWithHistory.<init>(TestRecovery.java:1928)
	at org.apache.hadoop.mapreduce.v2.app.TestRecovery.testSpeculative(TestRecovery.java:1164)


FAILED:  org.apache.hadoop.mapreduce.v2.app.TestRecovery.testRecoveryWithoutShuffleSecret

Error Message:
could not cleanup test dir

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: could not cleanup test dir
	at org.apache.hadoop.fs.AbstractFileSystem.createFileSystem(AbstractFileSystem.java:161)
	at org.apache.hadoop.fs.AbstractFileSystem.get(AbstractFileSystem.java:250)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:332)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:329)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.fs.FileContext.getAbstractFileSystem(FileContext.java:329)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:446)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:423)
	at org.apache.hadoop.fs.FileContext.getLocalFSFileContext(FileContext.java:409)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:239)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:208)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:189)
	at org.apache.hadoop.mapreduce.v2.app.TestRecovery$MRAppWithHistory.<init>(TestRecovery.java:1928)
	at org.apache.hadoop.mapreduce.v2.app.TestRecovery$MRAppNoShuffleSecret.<init>(TestRecovery.java:1960)
	at org.apache.hadoop.mapreduce.v2.app.TestRecovery.testRecoveryWithoutShuffleSecret(TestRecovery.java:1316)


FAILED:  org.apache.hadoop.mapreduce.v2.app.TestRecovery.testRecoveryWithOldCommiter

Error Message:
could not cleanup test dir

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: could not cleanup test dir
	at org.apache.hadoop.fs.AbstractFileSystem.createFileSystem(AbstractFileSystem.java:161)
	at org.apache.hadoop.fs.AbstractFileSystem.get(AbstractFileSystem.java:250)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:332)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:329)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.fs.FileContext.getAbstractFileSystem(FileContext.java:329)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:446)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:423)
	at org.apache.hadoop.fs.FileContext.getLocalFSFileContext(FileContext.java:409)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:239)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:208)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:189)
	at org.apache.hadoop.mapreduce.v2.app.TestRecovery$MRAppWithHistory.<init>(TestRecovery.java:1928)
	at org.apache.hadoop.mapreduce.v2.app.TestRecovery.testRecoveryWithOldCommiter(TestRecovery.java:1045)



Hadoop-Mapreduce-trunk - Build # 2477 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2477/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 33367 lines...]
  TestAMWebServicesAttempts.<init>:114->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesAttempts.<init>:114->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesAttempts.<init>:114->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesAttempts.<init>:114->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesAttempts.<init>:114->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesAttempts.<init>:114->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesAttempts.<init>:114->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesJobConf.<init>:151->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesJobConf.<init>:151->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesJobConf.<init>:151->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesJobConf.<init>:151->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesAttempt.<init>:130->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesAttempt.<init>:130->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesAttempt.<init>:130->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer
  TestAMWebServicesAttempt.<init>:130->JerseyTest.<init>:217->JerseyTest.getContainer:342 » TestContainer

Tests run: 339, Failures: 0, Errors: 23, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.748 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:36 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 29.189 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  4.765 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [08:55 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 11:10 min
[INFO] Finished at: 2015-10-14T17:10:52+00:00
[INFO] Final Memory: 39M/825M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HADOOP-12436
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
23 tests failed.
FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt.testGetTaskAttemptIdXMLState

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt.<init>(TestAMWebServicesAttempt.java:130)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt.testPutTaskAttemptIdState

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt.<init>(TestAMWebServicesAttempt.java:130)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt.testGetTaskAttemptIdState

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt.<init>(TestAMWebServicesAttempt.java:130)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt.testPutTaskAttemptIdXMLState

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempt.<init>(TestAMWebServicesAttempt.java:130)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.testTaskAttemptsXML

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.<init>(TestAMWebServicesAttempts.java:114)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.testTaskAttemptIdDefault

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.<init>(TestAMWebServicesAttempts.java:114)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.testTaskAttemptIdInvalid2

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.<init>(TestAMWebServicesAttempts.java:114)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.testTaskAttemptIdInvalid3

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.<init>(TestAMWebServicesAttempts.java:114)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.testTaskAttemptIdInvalid

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.<init>(TestAMWebServicesAttempts.java:114)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.testTaskAttemptId

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.<init>(TestAMWebServicesAttempts.java:114)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.testTaskAttemptIdNonExist

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.<init>(TestAMWebServicesAttempts.java:114)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.testTaskAttemptsDefault

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.<init>(TestAMWebServicesAttempts.java:114)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.testTaskAttemptIdXML

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.<init>(TestAMWebServicesAttempts.java:114)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.testTaskAttempts

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.<init>(TestAMWebServicesAttempts.java:114)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.testTaskAttemptIdXMLCounters

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.<init>(TestAMWebServicesAttempts.java:114)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.testTaskAttemptIdCounters

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.<init>(TestAMWebServicesAttempts.java:114)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.testTaskAttemptsSlash

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.<init>(TestAMWebServicesAttempts.java:114)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.testTaskAttemptIdBogus

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.<init>(TestAMWebServicesAttempts.java:114)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.testTaskAttemptIdSlash

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts.<init>(TestAMWebServicesAttempts.java:114)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobConf.testJobConf

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobConf.<init>(TestAMWebServicesJobConf.java:151)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobConf.testJobConfXML

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobConf.<init>(TestAMWebServicesJobConf.java:151)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobConf.testJobConfSlash

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobConf.<init>(TestAMWebServicesJobConf.java:151)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobConf.testJobConfDefault

Error Message:
java.net.BindException: Address already in use

Stack Trace:
com.sun.jersey.test.framework.spi.container.TestContainerException: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind0(Native Method)
	at sun.nio.ch.Net.bind(Net.java:444)
	at sun.nio.ch.Net.bind(Net.java:436)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:214)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:413)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:384)
	at org.glassfish.grizzly.nio.transport.TCPNIOTransport.bind(TCPNIOTransport.java:375)
	at org.glassfish.grizzly.http.server.NetworkListener.start(NetworkListener.java:549)
	at org.glassfish.grizzly.http.server.HttpServer.start(HttpServer.java:255)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:326)
	at com.sun.jersey.api.container.grizzly2.GrizzlyServerFactory.createHttpServer(GrizzlyServerFactory.java:343)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.instantiateGrizzlyWebServer(GrizzlyWebTestContainerFactory.java:219)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:129)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory$GrizzlyWebTestContainer.<init>(GrizzlyWebTestContainerFactory.java:86)
	at com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory.create(GrizzlyWebTestContainerFactory.java:79)
	at com.sun.jersey.test.framework.JerseyTest.getContainer(JerseyTest.java:342)
	at com.sun.jersey.test.framework.JerseyTest.<init>(JerseyTest.java:217)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobConf.<init>(TestAMWebServicesJobConf.java:151)



Hadoop-Mapreduce-trunk - Build # 2476 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2476/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 33398 lines...]
Running org.apache.hadoop.mapred.TestReduceTask
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.675 sec - in org.apache.hadoop.mapred.TestReduceTask
Running org.apache.hadoop.mapred.TestBadRecords
Tests run: 1, Failures: 0, Errors: 0, Skipped: 1, Time elapsed: 0.057 sec - in org.apache.hadoop.mapred.TestBadRecords
Running org.apache.hadoop.mapred.TestMiniMRDFSCaching
Tests run: 1, Failures: 0, Errors: 0, Skipped: 1, Time elapsed: 0.054 sec - in org.apache.hadoop.mapred.TestMiniMRDFSCaching
Running org.apache.hadoop.io.TestSequenceFileMergeProgress
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.882 sec - in org.apache.hadoop.io.TestSequenceFileMergeProgress
Running org.apache.hadoop.conf.TestNoDefaultsJobConf
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 23.84 sec - in org.apache.hadoop.conf.TestNoDefaultsJobConf

Results :

Tests in error: 
  TestNetworkedJob.testNetworkedJob:260 » IO org.apache.hadoop.yarn.exceptions.Y...

Tests run: 523, Failures: 0, Errors: 1, Skipped: 11

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.167 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:47 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 32.393 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.649 s]
[INFO] Apache Hadoop MapReduce App ....................... SUCCESS [09:52 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SUCCESS [05:51 min]
[INFO] Apache Hadoop MapReduce JobClient ................. FAILURE [  01:44 h]
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:03 h
[INFO] Finished at: 2015-10-14T14:04:48+00:00
[INFO] Final Memory: 35M/745M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-jobclient: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-jobclient
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating YARN-4250
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob

Error Message:
org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1044)
 at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
 at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
 at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
 at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2276)
 at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2272)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:415)
 at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
 at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2270)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
 at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1017)
 ... 10 more


Stack Trace:
java.io.IOException: org.apache.hadoop.yarn.exceptions.YarnException: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:38)
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1044)
	at org.apache.hadoop.yarn.api.impl.pb.service.ApplicationClientProtocolPBServiceImpl.getDelegationToken(ApplicationClientProtocolPBServiceImpl.java:325)
	at org.apache.hadoop.yarn.proto.ApplicationClientProtocol$ApplicationClientProtocolService$2.callBlockingMethod(ApplicationClientProtocol.java:483)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:637)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:976)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2276)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2272)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2270)
Caused by: java.io.IOException: Delegation Token can be issued only with kerberos authentication
	at org.apache.hadoop.yarn.server.resourcemanager.ClientRMService.getDelegationToken(ClientRMService.java:1017)
	... 10 more

	at org.apache.hadoop.ipc.Client.call(Client.java:1448)
	at org.apache.hadoop.ipc.Client.call(Client.java:1379)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)
	at com.sun.proxy.$Proxy84.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getDelegationToken(ApplicationClientProtocolPBClientImpl.java:339)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:251)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
	at com.sun.proxy.$Proxy85.getDelegationToken(Unknown Source)
	at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getRMDelegationToken(YarnClientImpl.java:541)
	at org.apache.hadoop.mapred.ResourceMgrDelegate.getDelegationToken(ResourceMgrDelegate.java:177)
	at org.apache.hadoop.mapred.YARNRunner.getDelegationToken(YARNRunner.java:231)
	at org.apache.hadoop.mapreduce.Cluster.getDelegationToken(Cluster.java:401)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1234)
	at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1231)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.mapred.JobClient.getDelegationToken(JobClient.java:1230)
	at org.apache.hadoop.mapred.TestNetworkedJob.testNetworkedJob(TestNetworkedJob.java:260)



Hadoop-Mapreduce-trunk - Build # 2475 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2475/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32988 lines...]

Results :

Tests in error: 
  TestRMContainerAllocator.testReportedAppProgress:970 » NullPointer
  TestRMContainerAllocator.testBlackListedNodesWithSchedulingToThatNode:1605 » NullPointer
  TestRMContainerAllocator.testBlackListedNodes:1318 » NullPointer
  TestRMContainerAllocator.testReducerRampdownDiagnostics:450 » NullPointer
  TestRMContainerAllocator.testReportedAppProgressWithOnlyMaps:1118 » NullPointer
  TestRMContainerAllocator.testUpdatedNodes:1213 » NullPointer
  TestRMContainerAllocator.testSimple:231 » NullPointer
  TestRMContainerAllocator.testIgnoreBlacklisting:1393->getContainerOnHost:1511 » NullPointer
  TestRMContainerAllocator.testRMContainerAllocatorResendsRequestsOnRMRestart:2509 » NullPointer

Tests run: 339, Failures: 0, Errors: 9, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.958 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:45 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 33.367 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.482 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [09:11 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 11:40 min
[INFO] Finished at: 2015-10-14T11:24:28+00:00
[INFO] Final Memory: 40M/825M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating YARN-4255
Updating YARN-4253
Updating YARN-4252
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
9 tests failed.
FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReportedAppProgress

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReportedAppProgress(TestRMContainerAllocator.java:970)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testBlackListedNodesWithSchedulingToThatNode

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testBlackListedNodesWithSchedulingToThatNode(TestRMContainerAllocator.java:1605)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testBlackListedNodes

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testBlackListedNodes(TestRMContainerAllocator.java:1318)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReducerRampdownDiagnostics

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReducerRampdownDiagnostics(TestRMContainerAllocator.java:450)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReportedAppProgressWithOnlyMaps

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReportedAppProgressWithOnlyMaps(TestRMContainerAllocator.java:1118)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testUpdatedNodes

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testUpdatedNodes(TestRMContainerAllocator.java:1213)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testSimple

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testSimple(TestRMContainerAllocator.java:231)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testIgnoreBlacklisting

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.getContainerOnHost(TestRMContainerAllocator.java:1511)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testIgnoreBlacklisting(TestRMContainerAllocator.java:1393)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testRMContainerAllocatorResendsRequestsOnRMRestart

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testRMContainerAllocatorResendsRequestsOnRMRestart(TestRMContainerAllocator.java:2509)



Hadoop-Mapreduce-trunk - Build # 2474 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2474/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32987 lines...]
Running org.apache.hadoop.mapred.TestTaskAttemptFinishingMonitor
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.636 sec - in org.apache.hadoop.mapred.TestTaskAttemptFinishingMonitor

Results :

Tests in error: 
  TestRMContainerAllocator.testReportedAppProgress:970 » NullPointer
  TestRMContainerAllocator.testBlackListedNodesWithSchedulingToThatNode:1605 » NullPointer
  TestRMContainerAllocator.testBlackListedNodes:1318 » NullPointer
  TestRMContainerAllocator.testReducerRampdownDiagnostics:450 » NullPointer
  TestRMContainerAllocator.testReportedAppProgressWithOnlyMaps:1118 » NullPointer
  TestRMContainerAllocator.testUpdatedNodes:1213 » NullPointer
  TestRMContainerAllocator.testSimple:231 » NullPointer
  TestRMContainerAllocator.testIgnoreBlacklisting:1393->getContainerOnHost:1511 » NullPointer
  TestRMContainerAllocator.testRMContainerAllocatorResendsRequestsOnRMRestart:2509 » NullPointer

Tests run: 339, Failures: 0, Errors: 9, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.076 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:40 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 29.923 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.014 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [08:40 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 11:00 min
[INFO] Finished at: 2015-10-14T07:25:16+00:00
[INFO] Final Memory: 39M/825M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-1172
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
9 tests failed.
FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReportedAppProgress

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:342)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:507)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReportedAppProgress(TestRMContainerAllocator.java:970)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testBlackListedNodesWithSchedulingToThatNode

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:342)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:507)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testBlackListedNodesWithSchedulingToThatNode(TestRMContainerAllocator.java:1605)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testBlackListedNodes

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:342)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:507)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testBlackListedNodes(TestRMContainerAllocator.java:1318)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReducerRampdownDiagnostics

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:342)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:507)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReducerRampdownDiagnostics(TestRMContainerAllocator.java:450)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReportedAppProgressWithOnlyMaps

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:342)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:507)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReportedAppProgressWithOnlyMaps(TestRMContainerAllocator.java:1118)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testUpdatedNodes

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:342)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:507)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testUpdatedNodes(TestRMContainerAllocator.java:1213)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testSimple

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:342)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:507)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testSimple(TestRMContainerAllocator.java:231)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testIgnoreBlacklisting

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:342)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:507)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.getContainerOnHost(TestRMContainerAllocator.java:1511)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testIgnoreBlacklisting(TestRMContainerAllocator.java:1393)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testRMContainerAllocatorResendsRequestsOnRMRestart

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:342)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:507)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testRMContainerAllocatorResendsRequestsOnRMRestart(TestRMContainerAllocator.java:2509)



Hadoop-Mapreduce-trunk - Build # 2473 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2473/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32987 lines...]
Running org.apache.hadoop.mapred.TestLocalContainerLauncher
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.224 sec - in org.apache.hadoop.mapred.TestLocalContainerLauncher

Results :

Tests in error: 
  TestRMContainerAllocator.testReportedAppProgress:970 » NullPointer
  TestRMContainerAllocator.testBlackListedNodesWithSchedulingToThatNode:1605 » NullPointer
  TestRMContainerAllocator.testBlackListedNodes:1318 » NullPointer
  TestRMContainerAllocator.testReducerRampdownDiagnostics:450 » NullPointer
  TestRMContainerAllocator.testReportedAppProgressWithOnlyMaps:1118 » NullPointer
  TestRMContainerAllocator.testUpdatedNodes:1213 » NullPointer
  TestRMContainerAllocator.testSimple:231 » NullPointer
  TestRMContainerAllocator.testIgnoreBlacklisting:1393->getContainerOnHost:1511 » NullPointer
  TestRMContainerAllocator.testRMContainerAllocatorResendsRequestsOnRMRestart:2509 » NullPointer

Tests run: 339, Failures: 0, Errors: 9, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  3.405 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:51 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 33.368 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.475 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [09:24 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 11:59 min
[INFO] Finished at: 2015-10-14T01:16:15+00:00
[INFO] Final Memory: 40M/825M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9187
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
9 tests failed.
FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReportedAppProgress

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReportedAppProgress(TestRMContainerAllocator.java:970)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testBlackListedNodesWithSchedulingToThatNode

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testBlackListedNodesWithSchedulingToThatNode(TestRMContainerAllocator.java:1605)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testBlackListedNodes

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testBlackListedNodes(TestRMContainerAllocator.java:1318)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReducerRampdownDiagnostics

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReducerRampdownDiagnostics(TestRMContainerAllocator.java:450)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReportedAppProgressWithOnlyMaps

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReportedAppProgressWithOnlyMaps(TestRMContainerAllocator.java:1118)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testUpdatedNodes

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testUpdatedNodes(TestRMContainerAllocator.java:1213)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testSimple

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testSimple(TestRMContainerAllocator.java:231)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testIgnoreBlacklisting

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.getContainerOnHost(TestRMContainerAllocator.java:1511)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testIgnoreBlacklisting(TestRMContainerAllocator.java:1393)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testRMContainerAllocatorResendsRequestsOnRMRestart

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testRMContainerAllocatorResendsRequestsOnRMRestart(TestRMContainerAllocator.java:2509)



Hadoop-Mapreduce-trunk - Build # 2472 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2472/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 33380 lines...]
  TestRMContainerAllocator.testReportedAppProgressWithOnlyMaps:1118 » NullPointer
  TestRMContainerAllocator.testUpdatedNodes:1213 » NullPointer
  TestRMContainerAllocator.testSimple:231 » NullPointer
  TestRMContainerAllocator.testIgnoreBlacklisting:1393->getContainerOnHost:1511 » NullPointer
  TestRMContainerAllocator.testRMContainerAllocatorResendsRequestsOnRMRestart:2509 » NullPointer
  TestRecovery.testMultipleCrashes:792 » YarnRuntime org.apache.hadoop.fs.Unsupp...
  TestRecovery.testRecoveryFailsUsingCustomOutputCommitter:572 » YarnRuntime cou...
  TestRecovery.testCrashOfMapsOnlyJob:327 » YarnRuntime could not cleanup test d...
  TestRecovery.testOutputRecovery:821 » YarnRuntime could not cleanup test dir
  TestRecovery.testOutputRecoveryMapsOnly:930 » YarnRuntime could not cleanup te...
  TestRecovery.testRecoverySuccessUsingCustomOutputCommitter:459 » YarnRuntime c...
  TestRecovery.testCrashed:130 » YarnRuntime could not cleanup test dir
  TestRecovery.testSpeculative:1164 » YarnRuntime could not cleanup test dir
  TestRecovery.testRecoveryWithoutShuffleSecret:1316 » YarnRuntime could not cle...
  TestRecovery.testRecoveryWithOldCommiter:1045 » YarnRuntime could not cleanup ...

Tests run: 339, Failures: 21, Errors: 19, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  2.942 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:36 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 29.307 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  4.859 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [07:58 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 10:13 min
[INFO] Finished at: 2015-10-13T23:55:50+00:00
[INFO] Final Memory: 39M/825M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HDFS-9167
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
40 tests failed.
FAILED:  org.apache.hadoop.mapreduce.v2.app.TestRecovery.testMultipleCrashes

Error Message:
org.apache.hadoop.fs.UnsupportedFileSystemException: fs.AbstractFileSystem.file.impl=null: No AbstractFileSystem configured for scheme: file

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: org.apache.hadoop.fs.UnsupportedFileSystemException: fs.AbstractFileSystem.file.impl=null: No AbstractFileSystem configured for scheme: file
	at org.apache.hadoop.fs.AbstractFileSystem.createFileSystem(AbstractFileSystem.java:161)
	at org.apache.hadoop.fs.AbstractFileSystem.get(AbstractFileSystem.java:250)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:332)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:329)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.fs.FileContext.getAbstractFileSystem(FileContext.java:329)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:446)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:472)
	at org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler.serviceInit(JobHistoryEventHandler.java:165)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:107)
	at org.apache.hadoop.mapreduce.v2.app.MRAppMaster.serviceInit(MRAppMaster.java:482)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.serviceInit(MRApp.java:267)
	at org.apache.hadoop.service.AbstractService.init(AbstractService.java:163)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.submit(MRApp.java:295)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.submit(MRApp.java:280)
	at org.apache.hadoop.mapreduce.v2.app.TestRecovery.testMultipleCrashes(TestRecovery.java:792)


FAILED:  org.apache.hadoop.mapreduce.v2.app.TestRecovery.testRecoveryFailsUsingCustomOutputCommitter

Error Message:
could not cleanup test dir

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: could not cleanup test dir
	at org.apache.hadoop.fs.AbstractFileSystem.createFileSystem(AbstractFileSystem.java:161)
	at org.apache.hadoop.fs.AbstractFileSystem.get(AbstractFileSystem.java:250)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:332)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:329)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.fs.FileContext.getAbstractFileSystem(FileContext.java:329)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:446)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:423)
	at org.apache.hadoop.fs.FileContext.getLocalFSFileContext(FileContext.java:409)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:239)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:208)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:189)
	at org.apache.hadoop.mapreduce.v2.app.TestRecovery$MRAppWithHistory.<init>(TestRecovery.java:1928)
	at org.apache.hadoop.mapreduce.v2.app.TestRecovery.testRecoveryFailsUsingCustomOutputCommitter(TestRecovery.java:572)


FAILED:  org.apache.hadoop.mapreduce.v2.app.TestRecovery.testCrashOfMapsOnlyJob

Error Message:
could not cleanup test dir

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: could not cleanup test dir
	at org.apache.hadoop.fs.AbstractFileSystem.createFileSystem(AbstractFileSystem.java:161)
	at org.apache.hadoop.fs.AbstractFileSystem.get(AbstractFileSystem.java:250)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:332)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:329)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.fs.FileContext.getAbstractFileSystem(FileContext.java:329)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:446)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:423)
	at org.apache.hadoop.fs.FileContext.getLocalFSFileContext(FileContext.java:409)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:239)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:208)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:189)
	at org.apache.hadoop.mapreduce.v2.app.TestRecovery$MRAppWithHistory.<init>(TestRecovery.java:1928)
	at org.apache.hadoop.mapreduce.v2.app.TestRecovery.testCrashOfMapsOnlyJob(TestRecovery.java:327)


FAILED:  org.apache.hadoop.mapreduce.v2.app.TestRecovery.testOutputRecovery

Error Message:
could not cleanup test dir

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: could not cleanup test dir
	at org.apache.hadoop.fs.AbstractFileSystem.createFileSystem(AbstractFileSystem.java:161)
	at org.apache.hadoop.fs.AbstractFileSystem.get(AbstractFileSystem.java:250)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:332)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:329)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.fs.FileContext.getAbstractFileSystem(FileContext.java:329)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:446)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:423)
	at org.apache.hadoop.fs.FileContext.getLocalFSFileContext(FileContext.java:409)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:239)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:208)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:189)
	at org.apache.hadoop.mapreduce.v2.app.TestRecovery$MRAppWithHistory.<init>(TestRecovery.java:1928)
	at org.apache.hadoop.mapreduce.v2.app.TestRecovery.testOutputRecovery(TestRecovery.java:821)


FAILED:  org.apache.hadoop.mapreduce.v2.app.TestRecovery.testOutputRecoveryMapsOnly

Error Message:
could not cleanup test dir

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: could not cleanup test dir
	at org.apache.hadoop.fs.AbstractFileSystem.createFileSystem(AbstractFileSystem.java:161)
	at org.apache.hadoop.fs.AbstractFileSystem.get(AbstractFileSystem.java:250)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:332)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:329)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.fs.FileContext.getAbstractFileSystem(FileContext.java:329)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:446)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:423)
	at org.apache.hadoop.fs.FileContext.getLocalFSFileContext(FileContext.java:409)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:239)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:208)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:189)
	at org.apache.hadoop.mapreduce.v2.app.TestRecovery$MRAppWithHistory.<init>(TestRecovery.java:1928)
	at org.apache.hadoop.mapreduce.v2.app.TestRecovery.testOutputRecoveryMapsOnly(TestRecovery.java:930)


FAILED:  org.apache.hadoop.mapreduce.v2.app.TestRecovery.testRecoverySuccessUsingCustomOutputCommitter

Error Message:
could not cleanup test dir

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: could not cleanup test dir
	at org.apache.hadoop.fs.AbstractFileSystem.createFileSystem(AbstractFileSystem.java:161)
	at org.apache.hadoop.fs.AbstractFileSystem.get(AbstractFileSystem.java:250)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:332)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:329)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.fs.FileContext.getAbstractFileSystem(FileContext.java:329)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:446)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:423)
	at org.apache.hadoop.fs.FileContext.getLocalFSFileContext(FileContext.java:409)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:239)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:208)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:189)
	at org.apache.hadoop.mapreduce.v2.app.TestRecovery$MRAppWithHistory.<init>(TestRecovery.java:1928)
	at org.apache.hadoop.mapreduce.v2.app.TestRecovery.testRecoverySuccessUsingCustomOutputCommitter(TestRecovery.java:459)


FAILED:  org.apache.hadoop.mapreduce.v2.app.TestRecovery.testCrashed

Error Message:
could not cleanup test dir

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: could not cleanup test dir
	at org.apache.hadoop.fs.AbstractFileSystem.createFileSystem(AbstractFileSystem.java:161)
	at org.apache.hadoop.fs.AbstractFileSystem.get(AbstractFileSystem.java:250)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:332)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:329)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.fs.FileContext.getAbstractFileSystem(FileContext.java:329)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:446)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:423)
	at org.apache.hadoop.fs.FileContext.getLocalFSFileContext(FileContext.java:409)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:239)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:208)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:189)
	at org.apache.hadoop.mapreduce.v2.app.TestRecovery$MRAppWithHistory.<init>(TestRecovery.java:1928)
	at org.apache.hadoop.mapreduce.v2.app.TestRecovery.testCrashed(TestRecovery.java:130)


FAILED:  org.apache.hadoop.mapreduce.v2.app.TestRecovery.testSpeculative

Error Message:
could not cleanup test dir

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: could not cleanup test dir
	at org.apache.hadoop.fs.AbstractFileSystem.createFileSystem(AbstractFileSystem.java:161)
	at org.apache.hadoop.fs.AbstractFileSystem.get(AbstractFileSystem.java:250)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:332)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:329)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.fs.FileContext.getAbstractFileSystem(FileContext.java:329)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:446)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:423)
	at org.apache.hadoop.fs.FileContext.getLocalFSFileContext(FileContext.java:409)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:239)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:208)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:189)
	at org.apache.hadoop.mapreduce.v2.app.TestRecovery$MRAppWithHistory.<init>(TestRecovery.java:1928)
	at org.apache.hadoop.mapreduce.v2.app.TestRecovery.testSpeculative(TestRecovery.java:1164)


FAILED:  org.apache.hadoop.mapreduce.v2.app.TestRecovery.testRecoveryWithoutShuffleSecret

Error Message:
could not cleanup test dir

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: could not cleanup test dir
	at org.apache.hadoop.fs.AbstractFileSystem.createFileSystem(AbstractFileSystem.java:161)
	at org.apache.hadoop.fs.AbstractFileSystem.get(AbstractFileSystem.java:250)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:332)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:329)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.fs.FileContext.getAbstractFileSystem(FileContext.java:329)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:446)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:423)
	at org.apache.hadoop.fs.FileContext.getLocalFSFileContext(FileContext.java:409)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:239)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:208)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:189)
	at org.apache.hadoop.mapreduce.v2.app.TestRecovery$MRAppWithHistory.<init>(TestRecovery.java:1928)
	at org.apache.hadoop.mapreduce.v2.app.TestRecovery$MRAppNoShuffleSecret.<init>(TestRecovery.java:1960)
	at org.apache.hadoop.mapreduce.v2.app.TestRecovery.testRecoveryWithoutShuffleSecret(TestRecovery.java:1316)


FAILED:  org.apache.hadoop.mapreduce.v2.app.TestRecovery.testRecoveryWithOldCommiter

Error Message:
could not cleanup test dir

Stack Trace:
org.apache.hadoop.yarn.exceptions.YarnRuntimeException: could not cleanup test dir
	at org.apache.hadoop.fs.AbstractFileSystem.createFileSystem(AbstractFileSystem.java:161)
	at org.apache.hadoop.fs.AbstractFileSystem.get(AbstractFileSystem.java:250)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:332)
	at org.apache.hadoop.fs.FileContext$2.run(FileContext.java:329)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1667)
	at org.apache.hadoop.fs.FileContext.getAbstractFileSystem(FileContext.java:329)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:446)
	at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:423)
	at org.apache.hadoop.fs.FileContext.getLocalFSFileContext(FileContext.java:409)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:239)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:208)
	at org.apache.hadoop.mapreduce.v2.app.MRApp.<init>(MRApp.java:189)
	at org.apache.hadoop.mapreduce.v2.app.TestRecovery$MRAppWithHistory.<init>(TestRecovery.java:1928)
	at org.apache.hadoop.mapreduce.v2.app.TestRecovery.testRecoveryWithOldCommiter(TestRecovery.java:1045)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobCountersXML

Error Message:
expected:<application/xml> but was:<text/html; charset=ISO-8859-1>

Stack Trace:
java.lang.AssertionError: expected:<application/xml> but was:<text/html; charset=ISO-8859-1>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:144)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobCountersXML(TestAMWebServicesJobs.java:688)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobAttemptsXML

Error Message:
expected:<application/xml> but was:<text/html; charset=ISO-8859-1>

Stack Trace:
java.lang.AssertionError: expected:<application/xml> but was:<text/html; charset=ISO-8859-1>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:144)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobAttemptsXML(TestAMWebServicesJobs.java:841)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobsXML

Error Message:
expected:<application/xml> but was:<text/html; charset=ISO-8859-1>

Stack Trace:
java.lang.AssertionError: expected:<application/xml> but was:<text/html; charset=ISO-8859-1>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:144)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobsXML(TestAMWebServicesJobs.java:179)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobsDefault

Error Message:
expected:<application/json> but was:<text/html; charset=ISO-8859-1>

Stack Trace:
java.lang.AssertionError: expected:<application/json> but was:<text/html; charset=ISO-8859-1>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:144)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobsDefault(TestAMWebServicesJobs.java:162)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobsSlash

Error Message:
expected:<application/json> but was:<text/html; charset=ISO-8859-1>

Stack Trace:
java.lang.AssertionError: expected:<application/json> but was:<text/html; charset=ISO-8859-1>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:144)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobsSlash(TestAMWebServicesJobs.java:146)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobId

Error Message:
expected:<application/json> but was:<text/html; charset=ISO-8859-1>

Stack Trace:
java.lang.AssertionError: expected:<application/json> but was:<text/html; charset=ISO-8859-1>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:144)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobId(TestAMWebServicesJobs.java:204)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobs

Error Message:
expected:<application/json> but was:<text/html; charset=ISO-8859-1>

Stack Trace:
java.lang.AssertionError: expected:<application/json> but was:<text/html; charset=ISO-8859-1>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:144)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobs(TestAMWebServicesJobs.java:129)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobIdXML

Error Message:
expected:<application/xml> but was:<text/html; charset=ISO-8859-1>

Stack Trace:
java.lang.AssertionError: expected:<application/xml> but was:<text/html; charset=ISO-8859-1>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:144)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobIdXML(TestAMWebServicesJobs.java:401)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobIdInvalidXML

Error Message:
expected:<Not Found> but was:<Internal Server Error>

Stack Trace:
java.lang.AssertionError: expected:<Not Found> but was:<Internal Server Error>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:144)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobIdInvalidXML(TestAMWebServicesJobs.java:332)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobIdInvalidBogus

Error Message:
expected:<Not Found> but was:<Internal Server Error>

Stack Trace:
java.lang.AssertionError: expected:<Not Found> but was:<Internal Server Error>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:144)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobIdInvalidBogus(TestAMWebServicesJobs.java:371)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobAttemptsSlash

Error Message:
expected:<application/json> but was:<text/html; charset=ISO-8859-1>

Stack Trace:
java.lang.AssertionError: expected:<application/json> but was:<text/html; charset=ISO-8859-1>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:144)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobAttemptsSlash(TestAMWebServicesJobs.java:805)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobIdSlash

Error Message:
expected:<application/json> but was:<text/html; charset=ISO-8859-1>

Stack Trace:
java.lang.AssertionError: expected:<application/json> but was:<text/html; charset=ISO-8859-1>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:144)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobIdSlash(TestAMWebServicesJobs.java:223)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobCountersSlash

Error Message:
expected:<application/json> but was:<text/html; charset=ISO-8859-1>

Stack Trace:
java.lang.AssertionError: expected:<application/json> but was:<text/html; charset=ISO-8859-1>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:144)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobCountersSlash(TestAMWebServicesJobs.java:653)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobIdDefault

Error Message:
expected:<application/json> but was:<text/html; charset=ISO-8859-1>

Stack Trace:
java.lang.AssertionError: expected:<application/json> but was:<text/html; charset=ISO-8859-1>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:144)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobIdDefault(TestAMWebServicesJobs.java:240)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobCounters

Error Message:
expected:<application/json> but was:<text/html; charset=ISO-8859-1>

Stack Trace:
java.lang.AssertionError: expected:<application/json> but was:<text/html; charset=ISO-8859-1>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:144)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobCounters(TestAMWebServicesJobs.java:635)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobIdInvalid

Error Message:
expected:<Not Found> but was:<Internal Server Error>

Stack Trace:
java.lang.AssertionError: expected:<Not Found> but was:<Internal Server Error>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:144)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobIdInvalid(TestAMWebServicesJobs.java:286)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobAttempts

Error Message:
expected:<application/json> but was:<text/html; charset=ISO-8859-1>

Stack Trace:
java.lang.AssertionError: expected:<application/json> but was:<text/html; charset=ISO-8859-1>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:144)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobAttempts(TestAMWebServicesJobs.java:787)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobIdInvalidDefault

Error Message:
expected:<Not Found> but was:<Internal Server Error>

Stack Trace:
java.lang.AssertionError: expected:<Not Found> but was:<Internal Server Error>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:144)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobIdInvalidDefault(TestAMWebServicesJobs.java:309)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobCountersDefault

Error Message:
expected:<application/json> but was:<text/html; charset=ISO-8859-1>

Stack Trace:
java.lang.AssertionError: expected:<application/json> but was:<text/html; charset=ISO-8859-1>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:144)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobCountersDefault(TestAMWebServicesJobs.java:670)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobIdNonExist

Error Message:
expected:<Not Found> but was:<Internal Server Error>

Stack Trace:
java.lang.AssertionError: expected:<Not Found> but was:<Internal Server Error>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:144)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobIdNonExist(TestAMWebServicesJobs.java:259)


FAILED:  org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobAttemptsDefault

Error Message:
expected:<application/json> but was:<text/html; charset=ISO-8859-1>

Stack Trace:
java.lang.AssertionError: expected:<application/json> but was:<text/html; charset=ISO-8859-1>
	at org.junit.Assert.fail(Assert.java:88)
	at org.junit.Assert.failNotEquals(Assert.java:743)
	at org.junit.Assert.assertEquals(Assert.java:118)
	at org.junit.Assert.assertEquals(Assert.java:144)
	at org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs.testJobAttemptsDefault(TestAMWebServicesJobs.java:823)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReportedAppProgress

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReportedAppProgress(TestRMContainerAllocator.java:970)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testBlackListedNodesWithSchedulingToThatNode

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testBlackListedNodesWithSchedulingToThatNode(TestRMContainerAllocator.java:1605)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testBlackListedNodes

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testBlackListedNodes(TestRMContainerAllocator.java:1318)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReducerRampdownDiagnostics

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReducerRampdownDiagnostics(TestRMContainerAllocator.java:450)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReportedAppProgressWithOnlyMaps

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReportedAppProgressWithOnlyMaps(TestRMContainerAllocator.java:1118)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testUpdatedNodes

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testUpdatedNodes(TestRMContainerAllocator.java:1213)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testSimple

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testSimple(TestRMContainerAllocator.java:231)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testIgnoreBlacklisting

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.getContainerOnHost(TestRMContainerAllocator.java:1511)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testIgnoreBlacklisting(TestRMContainerAllocator.java:1393)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testRMContainerAllocatorResendsRequestsOnRMRestart

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testRMContainerAllocatorResendsRequestsOnRMRestart(TestRMContainerAllocator.java:2509)



Hadoop-Mapreduce-trunk - Build # 2471 - Still Failing

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See https://builds.apache.org/job/Hadoop-Mapreduce-trunk/2471/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 32982 lines...]
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.128 sec - in org.apache.hadoop.mapred.TestLocalContainerLauncher

Results :

Tests in error: 
  TestRMContainerAllocator.testReportedAppProgress:970 » NullPointer
  TestRMContainerAllocator.testBlackListedNodesWithSchedulingToThatNode:1605 » NullPointer
  TestRMContainerAllocator.testBlackListedNodes:1318 » NullPointer
  TestRMContainerAllocator.testReducerRampdownDiagnostics:450 » NullPointer
  TestRMContainerAllocator.testReportedAppProgressWithOnlyMaps:1118 » NullPointer
  TestRMContainerAllocator.testUpdatedNodes:1213 » NullPointer
  TestRMContainerAllocator.testSimple:231 » NullPointer
  TestRMContainerAllocator.testIgnoreBlacklisting:1393->getContainerOnHost:1511 » NullPointer
  TestRMContainerAllocator.testRMContainerAllocatorResendsRequestsOnRMRestart:2509 » NullPointer

Tests run: 339, Failures: 0, Errors: 9, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop MapReduce Client .................... SUCCESS [  4.012 s]
[INFO] Apache Hadoop MapReduce Core ...................... SUCCESS [01:57 min]
[INFO] Apache Hadoop MapReduce Common .................... SUCCESS [ 34.738 s]
[INFO] Apache Hadoop MapReduce Shuffle ................... SUCCESS [  5.274 s]
[INFO] Apache Hadoop MapReduce App ....................... FAILURE [09:56 min]
[INFO] Apache Hadoop MapReduce HistoryServer ............. SKIPPED
[INFO] Apache Hadoop MapReduce JobClient ................. SKIPPED
[INFO] Apache Hadoop MapReduce HistoryServer Plugins ..... SKIPPED
[INFO] Apache Hadoop MapReduce NativeTask ................ SKIPPED
[INFO] Apache Hadoop MapReduce Examples .................. SKIPPED
[INFO] Apache Hadoop MapReduce ........................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 12:40 min
[INFO] Finished at: 2015-10-13T23:06:47+00:00
[INFO] Final Memory: 42M/825M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project hadoop-mapreduce-client-app: There are test failures.
[ERROR] 
[ERROR] Please refer to /home/jenkins/jenkins-slave/workspace/Hadoop-Mapreduce-trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/target/surefire-reports for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-mapreduce-client-app
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Archiving artifacts
Recording test results
Updating HADOOP-10775
Updating HDFS-8779
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) ##############################
9 tests failed.
FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReportedAppProgress

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReportedAppProgress(TestRMContainerAllocator.java:970)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testBlackListedNodesWithSchedulingToThatNode

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testBlackListedNodesWithSchedulingToThatNode(TestRMContainerAllocator.java:1605)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testBlackListedNodes

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testBlackListedNodes(TestRMContainerAllocator.java:1318)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReducerRampdownDiagnostics

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReducerRampdownDiagnostics(TestRMContainerAllocator.java:450)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReportedAppProgressWithOnlyMaps

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testReportedAppProgressWithOnlyMaps(TestRMContainerAllocator.java:1118)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testUpdatedNodes

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testUpdatedNodes(TestRMContainerAllocator.java:1213)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testSimple

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testSimple(TestRMContainerAllocator.java:231)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testIgnoreBlacklisting

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.getContainerOnHost(TestRMContainerAllocator.java:1511)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testIgnoreBlacklisting(TestRMContainerAllocator.java:1393)


FAILED:  org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testRMContainerAllocatorResendsRequestsOnRMRestart

Error Message:
null

Stack Trace:
java.lang.NullPointerException: null
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.isRequestLabelChanged(AppSchedulingInfo.java:420)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.updateResourceRequests(AppSchedulingInfo.java:341)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.updateResourceRequests(SchedulerApplicationAttempt.java:300)
	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.allocate(FifoScheduler.java:350)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyFifoScheduler.allocate(TestRMContainerAllocator.java:1719)
	at org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.allocate(ApplicationMasterService.java:506)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor.makeRemoteRequest(RMContainerRequestor.java:204)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.makeRemoteRequest(TestRMContainerAllocator.java:2060)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.getResources(RMContainerAllocator.java:724)
	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:268)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator$MyContainerAllocator.schedule(TestRMContainerAllocator.java:2023)
	at org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.testRMContainerAllocatorResendsRequestsOnRMRestart(TestRMContainerAllocator.java:2509)