You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@pig.apache.org by Apache Jenkins Server <je...@builds.apache.org> on 2012/03/21 11:37:34 UTC

Build failed in Jenkins: Pig-trunk #1212

See <https://builds.apache.org/job/Pig-trunk/1212/changes>

Changes:

[daijy] PIG-2608: Typo in PigStorage documentation for source tagging

[daijy] PIG-2505: missing test_no_exception

------------------------------------------
[...truncated 51912 lines...]
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:128)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/03/21 10:35:29 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] Shutting down DataNode 2
    [junit] 12/03/21 10:35:29 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/03/21 10:35:29 INFO ipc.Server: Stopping server on 58134
    [junit] 12/03/21 10:35:29 INFO ipc.Server: Stopping IPC Server listener on 58134
    [junit] 12/03/21 10:35:29 INFO ipc.Server: IPC Server handler 2 on 58134: exiting
    [junit] 12/03/21 10:35:29 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/03/21 10:35:29 INFO ipc.Server: IPC Server handler 1 on 58134: exiting
    [junit] 12/03/21 10:35:29 INFO ipc.Server: IPC Server handler 0 on 58134: exiting
    [junit] 12/03/21 10:35:29 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/03/21 10:35:29 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/03/21 10:35:29 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:33760, storageID=DS-1101057205-67.195.138.20-33760-1332325586734, infoPort=52166, ipcPort=58134):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/03/21 10:35:29 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/03/21 10:35:29 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:33760, storageID=DS-1101057205-67.195.138.20-33760-1332325586734, infoPort=52166, ipcPort=58134):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data5/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data6/current'}>
    [junit] 12/03/21 10:35:29 INFO ipc.Server: Stopping server on 58134
    [junit] 12/03/21 10:35:29 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/03/21 10:35:29 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/03/21 10:35:29 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/03/21 10:35:29 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/03/21 10:35:29 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/03/21 10:35:30 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:60141 to delete  blk_-7192402847186639452_1095 blk_-1109841277642312362_1101
    [junit] 12/03/21 10:35:30 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:38982 to delete  blk_-422404991587375213_1102 blk_-1109841277642312362_1101 blk_-6062460517911994428_1102
    [junit] 12/03/21 10:35:30 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/03/21 10:35:30 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1816741885
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1816741885
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:128)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/03/21 10:35:30 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] Shutting down DataNode 1
    [junit] 12/03/21 10:35:30 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/03/21 10:35:31 INFO ipc.Server: Stopping server on 60494
    [junit] 12/03/21 10:35:31 INFO ipc.Server: IPC Server handler 0 on 60494: exiting
    [junit] 12/03/21 10:35:31 INFO ipc.Server: IPC Server handler 1 on 60494: exiting
    [junit] 12/03/21 10:35:31 INFO ipc.Server: IPC Server handler 2 on 60494: exiting
    [junit] 12/03/21 10:35:31 INFO ipc.Server: Stopping IPC Server listener on 60494
    [junit] 12/03/21 10:35:31 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/03/21 10:35:31 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/03/21 10:35:31 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/03/21 10:35:31 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:60141, storageID=DS-1468001538-67.195.138.20-60141-1332325586376, infoPort=37555, ipcPort=60494):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/03/21 10:35:31 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/03/21 10:35:31 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/03/21 10:35:32 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/03/21 10:35:32 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:60141, storageID=DS-1468001538-67.195.138.20-60141-1332325586376, infoPort=37555, ipcPort=60494):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data3/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data4/current'}>
    [junit] 12/03/21 10:35:32 INFO ipc.Server: Stopping server on 60494
    [junit] 12/03/21 10:35:32 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/03/21 10:35:32 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/03/21 10:35:32 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/03/21 10:35:32 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/03/21 10:35:32 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-770761283
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-770761283
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] Shutting down DataNode 0
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:128)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/03/21 10:35:32 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/03/21 10:35:32 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/03/21 10:35:32 INFO ipc.Server: Stopping server on 37132
    [junit] 12/03/21 10:35:32 INFO ipc.Server: IPC Server handler 1 on 37132: exiting
    [junit] 12/03/21 10:35:32 INFO ipc.Server: IPC Server handler 0 on 37132: exiting
    [junit] 12/03/21 10:35:32 INFO ipc.Server: Stopping IPC Server listener on 37132
    [junit] 12/03/21 10:35:32 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/03/21 10:35:32 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/03/21 10:35:32 INFO ipc.Server: IPC Server handler 2 on 37132: exiting
    [junit] 12/03/21 10:35:32 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:40590, storageID=DS-1567455724-67.195.138.20-40590-1332325586005, infoPort=52252, ipcPort=37132):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/03/21 10:35:32 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/03/21 10:35:32 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/03/21 10:35:32 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:40590, storageID=DS-1567455724-67.195.138.20-40590-1332325586005, infoPort=52252, ipcPort=37132):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data1/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data2/current'}>
    [junit] 12/03/21 10:35:32 WARN util.MBeans: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.unRegisterMXBean(DataNode.java:513)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:726)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.run(DataNode.java:1442)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/03/21 10:35:32 INFO ipc.Server: Stopping server on 37132
    [junit] 12/03/21 10:35:32 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/03/21 10:35:32 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/03/21 10:35:32 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/03/21 10:35:32 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/03/21 10:35:32 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/03/21 10:35:33 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/03/21 10:35:33 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1993685444
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1993685444
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:128)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/03/21 10:35:33 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/03/21 10:35:33 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/03/21 10:35:33 WARN namenode.FSNamesystem: ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 12/03/21 10:35:33 INFO namenode.FSNamesystem: Number of transactions: 502 Total time for transactions(ms): 12Number of transactions batched in Syncs: 157 Number of syncs: 348 SyncTimes(ms): 4055 247 
    [junit] 12/03/21 10:35:33 INFO namenode.DecommissionManager: Interrupted Monitor
    [junit] java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.hdfs.server.namenode.DecommissionManager$Monitor.run(DecommissionManager.java:65)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/03/21 10:35:33 INFO ipc.Server: Stopping server on 33861
    [junit] 12/03/21 10:35:33 INFO ipc.Server: IPC Server handler 1 on 33861: exiting
    [junit] 12/03/21 10:35:33 INFO ipc.Server: IPC Server handler 0 on 33861: exiting
    [junit] 12/03/21 10:35:33 INFO ipc.Server: IPC Server handler 2 on 33861: exiting
    [junit] 12/03/21 10:35:33 INFO ipc.Server: IPC Server handler 3 on 33861: exiting
    [junit] 12/03/21 10:35:33 INFO ipc.Server: IPC Server handler 4 on 33861: exiting
    [junit] 12/03/21 10:35:33 INFO ipc.Server: IPC Server handler 5 on 33861: exiting
    [junit] 12/03/21 10:35:33 INFO ipc.Server: IPC Server handler 6 on 33861: exiting
    [junit] 12/03/21 10:35:33 INFO ipc.Server: IPC Server handler 7 on 33861: exiting
    [junit] 12/03/21 10:35:33 INFO ipc.Server: IPC Server handler 8 on 33861: exiting
    [junit] 12/03/21 10:35:33 INFO ipc.Server: IPC Server handler 9 on 33861: exiting
    [junit] 12/03/21 10:35:33 INFO ipc.Server: Stopping IPC Server listener on 33861
    [junit] 12/03/21 10:35:33 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/03/21 10:35:33 INFO ipc.Server: Stopping IPC Server Responder
    [junit] Tests run: 17, Failures: 3, Errors: 3, Time elapsed: 540.696 sec
    [junit] Test org.apache.pig.test.TestStore FAILED
    [junit] Running org.apache.pig.test.TestStringUDFs
    [junit] 12/03/21 10:35:34 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.NullPointerException
    [junit] 12/03/21 10:35:34 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/03/21 10:35:34 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -1
    [junit] 12/03/21 10:35:34 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -8
    [junit] 12/03/21 10:35:34 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/03/21 10:35:34 WARN builtin.INDEXOF: No logger object provided to UDF: org.apache.pig.builtin.INDEXOF. Failed to process input; error - null
    [junit] 12/03/21 10:35:34 WARN builtin.LAST_INDEX_OF: No logger object provided to UDF: org.apache.pig.builtin.LAST_INDEX_OF. Failed to process input; error - null
    [junit] Tests run: 11, Failures: 0, Errors: 0, Time elapsed: 0.107 sec
   [delete] Deleting directory /tmp/pig_junit_tmp1816288462

BUILD FAILED
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:781: The following error occurred while executing this line:
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:836: Tests failed!

Total time: 24 minutes 0 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Jenkins build is back to normal : Pig-trunk #1218

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1218/changes>


Re: Build failed in Jenkins: Pig-trunk #1217

Posted by Daniel Dai <da...@hortonworks.com>.
builds@apache.org might help. They fixed it once in a while, but I am
tired of chasing them all the time.

Daniel

On Wed, Mar 28, 2012 at 9:26 AM, Jonathan Coveney <jc...@gmail.com> wrote:
> This failure is beginning to get annoying... what steps do we (and I'm
> willing to take them) in order to rectify this? Does apache need to renew
> the license? Do we need to turn off clover in the CI builds?
>
> 2012/3/28 Apache Jenkins Server <je...@builds.apache.org>
>
>> See <https://builds.apache.org/job/Pig-trunk/1217/changes>
>>
>> Changes:
>>
>> [daijy] Fix several e2e tests
>>
>> [jcoveney] [trunk] PIG-2619: HBaseStorage constructs a Scan with
>> cacheBlocks = false
>>
>> ------------------------------------------
>> [...truncated 6506 lines...]
>>  [findbugs]   org.mozilla.javascript.NativeJavaObject
>>  [findbugs]   jline.ConsoleReaderInputStream
>>  [findbugs]   org.apache.log4j.PropertyConfigurator
>>  [findbugs]   org.apache.hadoop.mapred.TaskID
>>  [findbugs]   org.apache.commons.cli.CommandLine
>>  [findbugs]   org.python.core.Py
>>  [findbugs]   org.apache.hadoop.io.BooleanWritable$Comparator
>>  [findbugs]   org.apache.hadoop.io.LongWritable
>>  [findbugs]   org.antlr.runtime.BitSet
>>  [findbugs]   org.apache.hadoop.mapred.jobcontrol.Job
>>  [findbugs]   org.apache.hadoop.hbase.filter.CompareFilter$CompareOp
>>  [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Reader
>>  [findbugs]   org.mozilla.javascript.NativeFunction
>>  [findbugs]   org.apache.hadoop.mapreduce.Counter
>>  [findbugs]   org.codehaus.jackson.JsonEncoding
>>  [findbugs]   org.codehaus.jackson.JsonParseException
>>  [findbugs]   org.python.core.PyCode
>>  [findbugs]   com.jcraft.jsch.HostKey
>>  [findbugs]   org.apache.hadoop.hbase.filter.Filter
>>  [findbugs]   org.apache.commons.logging.Log
>>  [findbugs]   com.google.common.util.concurrent.ListenableFuture
>>  [findbugs]   org.apache.hadoop.util.RunJar
>>  [findbugs]   org.apache.hadoop.mapred.Counters$Group
>>  [findbugs]   com.jcraft.jsch.ChannelExec
>>  [findbugs]   org.apache.hadoop.hbase.util.Base64
>>  [findbugs]   org.antlr.runtime.TokenStream
>>  [findbugs]   org.apache.hadoop.io.IOUtils
>>  [findbugs]   com.google.common.util.concurrent.CheckedFuture
>>  [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Reader$Scanner$Entry
>>  [findbugs]   org.apache.hadoop.fs.FSDataInputStream
>>  [findbugs]   org.python.core.PyObject
>>  [findbugs]   jline.History
>>  [findbugs]   org.apache.hadoop.io.BooleanWritable
>>  [findbugs]   org.apache.log4j.Logger
>>  [findbugs]   org.apache.hadoop.hbase.filter.FamilyFilter
>>  [findbugs]   org.antlr.runtime.IntStream
>>  [findbugs]   org.apache.hadoop.util.ReflectionUtils
>>  [findbugs]   org.apache.hadoop.fs.ContentSummary
>>  [findbugs]   org.python.core.PyTuple
>>  [findbugs]   org.apache.hadoop.conf.Configuration
>>  [findbugs]   com.google.common.base.Joiner
>>  [findbugs]   org.apache.hadoop.mapreduce.lib.input.FileSplit
>>  [findbugs]   org.apache.hadoop.mapred.Counters$Counter
>>  [findbugs]   com.jcraft.jsch.Channel
>>  [findbugs]   org.apache.hadoop.mapred.JobPriority
>>  [findbugs]   org.apache.commons.cli.Options
>>  [findbugs]   org.apache.hadoop.mapred.JobID
>>  [findbugs]   org.apache.hadoop.util.bloom.BloomFilter
>>  [findbugs]   org.python.core.PyFrame
>>  [findbugs]   org.apache.hadoop.hbase.filter.CompareFilter
>>  [findbugs]   org.apache.hadoop.util.VersionInfo
>>  [findbugs]   org.python.core.PyString
>>  [findbugs]   org.apache.hadoop.io.Text$Comparator
>>  [findbugs]   org.antlr.runtime.MismatchedSetException
>>  [findbugs]   org.apache.hadoop.io.BytesWritable
>>  [findbugs]   org.apache.hadoop.fs.FsShell
>>  [findbugs]   org.mozilla.javascript.ImporterTopLevel
>>  [findbugs]   org.apache.hadoop.hbase.mapreduce.TableOutputFormat
>>  [findbugs]   org.apache.hadoop.mapred.TaskReport
>>  [findbugs]   org.antlr.runtime.tree.RewriteRuleSubtreeStream
>>  [findbugs]   org.apache.commons.cli.HelpFormatter
>>  [findbugs]   org.mozilla.javascript.NativeObject
>>  [findbugs]   org.apache.hadoop.hbase.HConstants
>>  [findbugs]   org.apache.hadoop.io.serializer.Deserializer
>>  [findbugs]   org.antlr.runtime.FailedPredicateException
>>  [findbugs]   org.apache.hadoop.io.compress.CompressionCodec
>>  [findbugs]   org.apache.hadoop.fs.FileStatus
>>  [findbugs]   org.apache.hadoop.hbase.client.Result
>>  [findbugs]   org.apache.hadoop.mapreduce.JobContext
>>  [findbugs]   org.codehaus.jackson.JsonGenerator
>>  [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptContext
>>  [findbugs]   org.apache.hadoop.io.BytesWritable$Comparator
>>  [findbugs]   org.apache.hadoop.io.LongWritable$Comparator
>>  [findbugs]   org.codehaus.jackson.map.util.LRUMap
>>  [findbugs]   org.apache.hadoop.hbase.util.Bytes
>>  [findbugs]   org.antlr.runtime.MismatchedTokenException
>>  [findbugs]   org.codehaus.jackson.JsonParser
>>  [findbugs]   com.jcraft.jsch.UserInfo
>>  [findbugs]   org.python.core.PyException
>>  [findbugs]   org.apache.commons.cli.ParseException
>>  [findbugs]   org.apache.hadoop.io.compress.CompressionOutputStream
>>  [findbugs]   org.apache.hadoop.hbase.filter.WritableByteArrayComparable
>>  [findbugs]   org.antlr.runtime.tree.CommonTreeNodeStream
>>  [findbugs]   org.apache.log4j.Level
>>  [findbugs]   org.apache.hadoop.hbase.client.Scan
>>  [findbugs]   org.apache.hadoop.mapreduce.Job
>>  [findbugs]   com.google.common.util.concurrent.Futures
>>  [findbugs]   org.apache.commons.logging.LogFactory
>>  [findbugs]   org.apache.commons.codec.binary.Base64
>>  [findbugs]   org.codehaus.jackson.map.ObjectMapper
>>  [findbugs]   org.apache.hadoop.fs.FileSystem
>>  [findbugs]   org.apache.hadoop.hbase.filter.FilterList$Operator
>>  [findbugs]   org.apache.hadoop.hbase.io.ImmutableBytesWritable
>>  [findbugs]   org.apache.hadoop.io.serializer.SerializationFactory
>>  [findbugs]   org.antlr.runtime.tree.TreeAdaptor
>>  [findbugs]   org.apache.hadoop.mapred.RunningJob
>>  [findbugs]   org.antlr.runtime.CommonTokenStream
>>  [findbugs]   org.apache.hadoop.io.DataInputBuffer
>>  [findbugs]   org.apache.hadoop.io.file.tfile.TFile
>>  [findbugs]   org.apache.commons.cli.GnuParser
>>  [findbugs]   org.mozilla.javascript.Context
>>  [findbugs]   org.apache.hadoop.io.FloatWritable
>>  [findbugs]   org.antlr.runtime.tree.RewriteEarlyExitException
>>  [findbugs]   org.apache.hadoop.hbase.HBaseConfiguration
>>  [findbugs]   org.codehaus.jackson.JsonGenerationException
>>  [findbugs]   org.apache.hadoop.mapreduce.TaskInputOutputContext
>>  [findbugs]   org.apache.hadoop.io.compress.GzipCodec
>>  [findbugs]   org.apache.hadoop.mapred.jobcontrol.JobControl
>>  [findbugs]   org.antlr.runtime.BaseRecognizer
>>  [findbugs]   org.apache.hadoop.fs.FileUtil
>>  [findbugs]   org.apache.hadoop.fs.Path
>>  [findbugs]   org.apache.hadoop.hbase.client.Put
>>  [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Writer
>>  [findbugs]   jline.ConsoleReader
>>  [findbugs]   com.google.common.collect.Lists
>>  [findbugs]   org.apache.hadoop.mapreduce.MapContext
>>  [findbugs]   org.python.core.PyJavaPackage
>>  [findbugs]   org.apache.hadoop.hbase.filter.ColumnPrefixFilter
>>  [findbugs]   org.python.core.PyStringMap
>>  [findbugs]   org.apache.hadoop.mapreduce.TaskID
>>  [findbugs]   org.apache.hadoop.hbase.client.HTable
>>  [findbugs]   org.apache.hadoop.io.FloatWritable$Comparator
>>  [findbugs]   org.apache.zookeeper.ZooKeeper
>>  [findbugs]   org.codehaus.jackson.map.JsonMappingException
>>  [findbugs]   org.python.core.PyFunction
>>  [findbugs]   org.antlr.runtime.TokenSource
>>  [findbugs]   com.jcraft.jsch.ChannelDirectTCPIP
>>  [findbugs]   com.jcraft.jsch.JSchException
>>  [findbugs]   org.python.util.PythonInterpreter
>>  [findbugs]   org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil
>>  [findbugs]   org.python.core.PyInteger
>>  [findbugs]   org.apache.hadoop.mapred.JobConf
>>  [findbugs]   org.apache.hadoop.util.bloom.Key
>>  [findbugs]   org.apache.hadoop.io.Text
>>  [findbugs]   org.antlr.runtime.NoViableAltException
>>  [findbugs]   org.apache.hadoop.util.GenericOptionsParser
>>  [findbugs]   org.apache.hadoop.mapreduce.JobID
>>  [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptID
>>  [findbugs]   org.apache.hadoop.filecache.DistributedCache
>>  [findbugs]   org.apache.hadoop.fs.FSDataOutputStream
>>  [findbugs]   org.python.core.PyList
>>  [findbugs]   org.antlr.runtime.tree.TreeNodeStream
>>  [findbugs]   org.apache.hadoop.hbase.filter.BinaryComparator
>>  [findbugs]   dk.brics.automaton.RegExp
>>  [findbugs]   org.mozilla.javascript.Scriptable
>>  [findbugs]   org.mozilla.javascript.EcmaError
>>  [findbugs]   org.apache.hadoop.io.serializer.Serializer
>>  [findbugs]   org.apache.hadoop.util.bloom.Filter
>>  [findbugs]   org.python.core.PyNone
>>  [findbugs]   org.mozilla.javascript.Function
>>  [findbugs]   org.python.core.PySystemState
>>  [findbugs]   org.antlr.runtime.RecognizerSharedState
>>  [findbugs]   org.codehaus.jackson.JsonFactory
>>  [findbugs]   org.antlr.runtime.EarlyExitException
>>  [findbugs]   org.apache.hadoop.hdfs.DistributedFileSystem
>>  [findbugs]   org.apache.hadoop.util.LineReader
>>  [findbugs] Warnings generated: 25
>>  [findbugs] Missing classes: 233
>>  [findbugs] Calculating exit code...
>>  [findbugs] Setting 'missing class' flag (2)
>>  [findbugs] Setting 'bugs found' flag (1)
>>  [findbugs] Exit code set to: 3
>>  [findbugs] Java Result: 3
>>  [findbugs] Classes needed for analysis were missing
>>  [findbugs] Output saved to <
>> https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml
>> >
>>     [xslt] Processing <
>> https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml>
>> to <
>> https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.html
>> >
>>     [xslt] Loading stylesheet
>> /home/jenkins/tools/findbugs/latest/src/xsl/default.xsl
>>
>> BUILD SUCCESSFUL
>> Total time: 14 minutes 2 seconds
>>
>>
>> ======================================================================
>> ======================================================================
>> STORE: saving artifacts
>> ======================================================================
>> ======================================================================
>>
>>
>>
>>
>> ======================================================================
>> ======================================================================
>> CLEAN: cleaning workspace
>> ======================================================================
>> ======================================================================
>>
>>
>> Buildfile: build.xml
>>
>> clean:
>>   [delete] Deleting directory <
>> https://builds.apache.org/job/Pig-trunk/ws/trunk/src-gen>
>>   [delete] Deleting directory <
>> https://builds.apache.org/job/Pig-trunk/ws/trunk/src/docs/build>
>>   [delete] Deleting directory <
>> https://builds.apache.org/job/Pig-trunk/ws/trunk/build>
>>   [delete] Deleting directory <
>> https://builds.apache.org/job/Pig-trunk/ws/trunk/test/org/apache/pig/test/utils/dotGraph/parser
>> >
>>   [delete] Deleting: <
>> https://builds.apache.org/job/Pig-trunk/ws/trunk/pig.jar>
>>   [delete] Deleting: <
>> https://builds.apache.org/job/Pig-trunk/ws/trunk/pig-withouthadoop.jar>
>>
>> clean:
>>
>> clean:
>>
>> BUILD SUCCESSFUL
>> Total time: 0 seconds
>>
>>
>> ======================================================================
>> ======================================================================
>> ANALYSIS: ant -Drun.clover=true
>> -Dclover.home=/homes/hudson/tools/clover/latest clover test-commit
>> generate-clover-reports -Dtest.junit.output.format=xml -Dtest.output=yes
>> -Dversion=${BUILD_ID} -Dfindbugs.home=$FINDBUGS_HOME
>> -Djava5.home=$JAVA5_HOME -Dforrest.home=$FORREST_HOME
>> -Dclover.home=$CLOVER_HOME -Declipse.home=$ECLIPSE_HOME
>> ======================================================================
>> ======================================================================
>>
>>
>> Buildfile: build.xml
>>
>> clover.setup:
>>    [mkdir] Created dir: <
>> https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/clover/db>
>> [clover-setup] Clover Version 3.1.0, built on May 31 2011 (build-821)
>> [clover-setup] Loaded from:
>> /home/jenkins/tools/clover/latest/lib/clover.jar
>>
>> BUILD FAILED
>> java.lang.RuntimeException: Clover upgrades for your license ended
>> December 14 2010, and this version of Clover was built May 31 2011. Please
>> visit http://www.atlassian.com/clover/renew for information on upgrading
>> your license.
>>        at
>> com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:103)
>>        at
>> com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:25)
>>        at
>> com.cenqua.clover.tasks.AbstractCloverTask.execute(AbstractCloverTask.java:52)
>>        at
>> org.apache.tools.ant.UnknownElement.execute(UnknownElement.java:288)
>>        at sun.reflect.GeneratedMethodAccessor1.invoke(Unknown Source)
>>        at
>> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
>>        at java.lang.reflect.Method.invoke(Method.java:597)
>>        at
>> org.apache.tools.ant.dispatch.DispatchUtils.execute(DispatchUtils.java:106)
>>        at org.apache.tools.ant.Task.perform(Task.java:348)
>>        at org.apache.tools.ant.Target.execute(Target.java:357)
>>        at org.apache.tools.ant.Target.performTasks(Target.java:385)
>>        at
>> org.apache.tools.ant.Project.executeSortedTargets(Project.java:1337)
>>        at org.apache.tools.ant.Project.executeTarget(Project.java:1306)
>>        at
>> org.apache.tools.ant.helper.DefaultExecutor.executeTargets(DefaultExecutor.java:41)
>>        at org.apache.tools.ant.Project.executeTargets(Project.java:1189)
>>        at org.apache.tools.ant.Main.runBuild(Main.java:758)
>>        at org.apache.tools.ant.Main.startAnt(Main.java:217)
>>        at org.apache.tools.ant.launch.Launcher.run(Launcher.java:257)
>>        at org.apache.tools.ant.launch.Launcher.main(Launcher.java:104)
>>
>> Total time: 1 second
>> Build step 'Execute shell' marked build as failure
>> [FINDBUGS] Skipping publisher since build result is FAILURE
>> Recording test results
>> Publishing Javadoc
>> Archiving artifacts
>> Recording fingerprints
>>

Re: Build failed in Jenkins: Pig-trunk #1217

Posted by Jonathan Coveney <jc...@gmail.com>.
This failure is beginning to get annoying... what steps do we (and I'm
willing to take them) in order to rectify this? Does apache need to renew
the license? Do we need to turn off clover in the CI builds?

2012/3/28 Apache Jenkins Server <je...@builds.apache.org>

> See <https://builds.apache.org/job/Pig-trunk/1217/changes>
>
> Changes:
>
> [daijy] Fix several e2e tests
>
> [jcoveney] [trunk] PIG-2619: HBaseStorage constructs a Scan with
> cacheBlocks = false
>
> ------------------------------------------
> [...truncated 6506 lines...]
>  [findbugs]   org.mozilla.javascript.NativeJavaObject
>  [findbugs]   jline.ConsoleReaderInputStream
>  [findbugs]   org.apache.log4j.PropertyConfigurator
>  [findbugs]   org.apache.hadoop.mapred.TaskID
>  [findbugs]   org.apache.commons.cli.CommandLine
>  [findbugs]   org.python.core.Py
>  [findbugs]   org.apache.hadoop.io.BooleanWritable$Comparator
>  [findbugs]   org.apache.hadoop.io.LongWritable
>  [findbugs]   org.antlr.runtime.BitSet
>  [findbugs]   org.apache.hadoop.mapred.jobcontrol.Job
>  [findbugs]   org.apache.hadoop.hbase.filter.CompareFilter$CompareOp
>  [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Reader
>  [findbugs]   org.mozilla.javascript.NativeFunction
>  [findbugs]   org.apache.hadoop.mapreduce.Counter
>  [findbugs]   org.codehaus.jackson.JsonEncoding
>  [findbugs]   org.codehaus.jackson.JsonParseException
>  [findbugs]   org.python.core.PyCode
>  [findbugs]   com.jcraft.jsch.HostKey
>  [findbugs]   org.apache.hadoop.hbase.filter.Filter
>  [findbugs]   org.apache.commons.logging.Log
>  [findbugs]   com.google.common.util.concurrent.ListenableFuture
>  [findbugs]   org.apache.hadoop.util.RunJar
>  [findbugs]   org.apache.hadoop.mapred.Counters$Group
>  [findbugs]   com.jcraft.jsch.ChannelExec
>  [findbugs]   org.apache.hadoop.hbase.util.Base64
>  [findbugs]   org.antlr.runtime.TokenStream
>  [findbugs]   org.apache.hadoop.io.IOUtils
>  [findbugs]   com.google.common.util.concurrent.CheckedFuture
>  [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Reader$Scanner$Entry
>  [findbugs]   org.apache.hadoop.fs.FSDataInputStream
>  [findbugs]   org.python.core.PyObject
>  [findbugs]   jline.History
>  [findbugs]   org.apache.hadoop.io.BooleanWritable
>  [findbugs]   org.apache.log4j.Logger
>  [findbugs]   org.apache.hadoop.hbase.filter.FamilyFilter
>  [findbugs]   org.antlr.runtime.IntStream
>  [findbugs]   org.apache.hadoop.util.ReflectionUtils
>  [findbugs]   org.apache.hadoop.fs.ContentSummary
>  [findbugs]   org.python.core.PyTuple
>  [findbugs]   org.apache.hadoop.conf.Configuration
>  [findbugs]   com.google.common.base.Joiner
>  [findbugs]   org.apache.hadoop.mapreduce.lib.input.FileSplit
>  [findbugs]   org.apache.hadoop.mapred.Counters$Counter
>  [findbugs]   com.jcraft.jsch.Channel
>  [findbugs]   org.apache.hadoop.mapred.JobPriority
>  [findbugs]   org.apache.commons.cli.Options
>  [findbugs]   org.apache.hadoop.mapred.JobID
>  [findbugs]   org.apache.hadoop.util.bloom.BloomFilter
>  [findbugs]   org.python.core.PyFrame
>  [findbugs]   org.apache.hadoop.hbase.filter.CompareFilter
>  [findbugs]   org.apache.hadoop.util.VersionInfo
>  [findbugs]   org.python.core.PyString
>  [findbugs]   org.apache.hadoop.io.Text$Comparator
>  [findbugs]   org.antlr.runtime.MismatchedSetException
>  [findbugs]   org.apache.hadoop.io.BytesWritable
>  [findbugs]   org.apache.hadoop.fs.FsShell
>  [findbugs]   org.mozilla.javascript.ImporterTopLevel
>  [findbugs]   org.apache.hadoop.hbase.mapreduce.TableOutputFormat
>  [findbugs]   org.apache.hadoop.mapred.TaskReport
>  [findbugs]   org.antlr.runtime.tree.RewriteRuleSubtreeStream
>  [findbugs]   org.apache.commons.cli.HelpFormatter
>  [findbugs]   org.mozilla.javascript.NativeObject
>  [findbugs]   org.apache.hadoop.hbase.HConstants
>  [findbugs]   org.apache.hadoop.io.serializer.Deserializer
>  [findbugs]   org.antlr.runtime.FailedPredicateException
>  [findbugs]   org.apache.hadoop.io.compress.CompressionCodec
>  [findbugs]   org.apache.hadoop.fs.FileStatus
>  [findbugs]   org.apache.hadoop.hbase.client.Result
>  [findbugs]   org.apache.hadoop.mapreduce.JobContext
>  [findbugs]   org.codehaus.jackson.JsonGenerator
>  [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptContext
>  [findbugs]   org.apache.hadoop.io.BytesWritable$Comparator
>  [findbugs]   org.apache.hadoop.io.LongWritable$Comparator
>  [findbugs]   org.codehaus.jackson.map.util.LRUMap
>  [findbugs]   org.apache.hadoop.hbase.util.Bytes
>  [findbugs]   org.antlr.runtime.MismatchedTokenException
>  [findbugs]   org.codehaus.jackson.JsonParser
>  [findbugs]   com.jcraft.jsch.UserInfo
>  [findbugs]   org.python.core.PyException
>  [findbugs]   org.apache.commons.cli.ParseException
>  [findbugs]   org.apache.hadoop.io.compress.CompressionOutputStream
>  [findbugs]   org.apache.hadoop.hbase.filter.WritableByteArrayComparable
>  [findbugs]   org.antlr.runtime.tree.CommonTreeNodeStream
>  [findbugs]   org.apache.log4j.Level
>  [findbugs]   org.apache.hadoop.hbase.client.Scan
>  [findbugs]   org.apache.hadoop.mapreduce.Job
>  [findbugs]   com.google.common.util.concurrent.Futures
>  [findbugs]   org.apache.commons.logging.LogFactory
>  [findbugs]   org.apache.commons.codec.binary.Base64
>  [findbugs]   org.codehaus.jackson.map.ObjectMapper
>  [findbugs]   org.apache.hadoop.fs.FileSystem
>  [findbugs]   org.apache.hadoop.hbase.filter.FilterList$Operator
>  [findbugs]   org.apache.hadoop.hbase.io.ImmutableBytesWritable
>  [findbugs]   org.apache.hadoop.io.serializer.SerializationFactory
>  [findbugs]   org.antlr.runtime.tree.TreeAdaptor
>  [findbugs]   org.apache.hadoop.mapred.RunningJob
>  [findbugs]   org.antlr.runtime.CommonTokenStream
>  [findbugs]   org.apache.hadoop.io.DataInputBuffer
>  [findbugs]   org.apache.hadoop.io.file.tfile.TFile
>  [findbugs]   org.apache.commons.cli.GnuParser
>  [findbugs]   org.mozilla.javascript.Context
>  [findbugs]   org.apache.hadoop.io.FloatWritable
>  [findbugs]   org.antlr.runtime.tree.RewriteEarlyExitException
>  [findbugs]   org.apache.hadoop.hbase.HBaseConfiguration
>  [findbugs]   org.codehaus.jackson.JsonGenerationException
>  [findbugs]   org.apache.hadoop.mapreduce.TaskInputOutputContext
>  [findbugs]   org.apache.hadoop.io.compress.GzipCodec
>  [findbugs]   org.apache.hadoop.mapred.jobcontrol.JobControl
>  [findbugs]   org.antlr.runtime.BaseRecognizer
>  [findbugs]   org.apache.hadoop.fs.FileUtil
>  [findbugs]   org.apache.hadoop.fs.Path
>  [findbugs]   org.apache.hadoop.hbase.client.Put
>  [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Writer
>  [findbugs]   jline.ConsoleReader
>  [findbugs]   com.google.common.collect.Lists
>  [findbugs]   org.apache.hadoop.mapreduce.MapContext
>  [findbugs]   org.python.core.PyJavaPackage
>  [findbugs]   org.apache.hadoop.hbase.filter.ColumnPrefixFilter
>  [findbugs]   org.python.core.PyStringMap
>  [findbugs]   org.apache.hadoop.mapreduce.TaskID
>  [findbugs]   org.apache.hadoop.hbase.client.HTable
>  [findbugs]   org.apache.hadoop.io.FloatWritable$Comparator
>  [findbugs]   org.apache.zookeeper.ZooKeeper
>  [findbugs]   org.codehaus.jackson.map.JsonMappingException
>  [findbugs]   org.python.core.PyFunction
>  [findbugs]   org.antlr.runtime.TokenSource
>  [findbugs]   com.jcraft.jsch.ChannelDirectTCPIP
>  [findbugs]   com.jcraft.jsch.JSchException
>  [findbugs]   org.python.util.PythonInterpreter
>  [findbugs]   org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil
>  [findbugs]   org.python.core.PyInteger
>  [findbugs]   org.apache.hadoop.mapred.JobConf
>  [findbugs]   org.apache.hadoop.util.bloom.Key
>  [findbugs]   org.apache.hadoop.io.Text
>  [findbugs]   org.antlr.runtime.NoViableAltException
>  [findbugs]   org.apache.hadoop.util.GenericOptionsParser
>  [findbugs]   org.apache.hadoop.mapreduce.JobID
>  [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptID
>  [findbugs]   org.apache.hadoop.filecache.DistributedCache
>  [findbugs]   org.apache.hadoop.fs.FSDataOutputStream
>  [findbugs]   org.python.core.PyList
>  [findbugs]   org.antlr.runtime.tree.TreeNodeStream
>  [findbugs]   org.apache.hadoop.hbase.filter.BinaryComparator
>  [findbugs]   dk.brics.automaton.RegExp
>  [findbugs]   org.mozilla.javascript.Scriptable
>  [findbugs]   org.mozilla.javascript.EcmaError
>  [findbugs]   org.apache.hadoop.io.serializer.Serializer
>  [findbugs]   org.apache.hadoop.util.bloom.Filter
>  [findbugs]   org.python.core.PyNone
>  [findbugs]   org.mozilla.javascript.Function
>  [findbugs]   org.python.core.PySystemState
>  [findbugs]   org.antlr.runtime.RecognizerSharedState
>  [findbugs]   org.codehaus.jackson.JsonFactory
>  [findbugs]   org.antlr.runtime.EarlyExitException
>  [findbugs]   org.apache.hadoop.hdfs.DistributedFileSystem
>  [findbugs]   org.apache.hadoop.util.LineReader
>  [findbugs] Warnings generated: 25
>  [findbugs] Missing classes: 233
>  [findbugs] Calculating exit code...
>  [findbugs] Setting 'missing class' flag (2)
>  [findbugs] Setting 'bugs found' flag (1)
>  [findbugs] Exit code set to: 3
>  [findbugs] Java Result: 3
>  [findbugs] Classes needed for analysis were missing
>  [findbugs] Output saved to <
> https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml
> >
>     [xslt] Processing <
> https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml>
> to <
> https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.html
> >
>     [xslt] Loading stylesheet
> /home/jenkins/tools/findbugs/latest/src/xsl/default.xsl
>
> BUILD SUCCESSFUL
> Total time: 14 minutes 2 seconds
>
>
> ======================================================================
> ======================================================================
> STORE: saving artifacts
> ======================================================================
> ======================================================================
>
>
>
>
> ======================================================================
> ======================================================================
> CLEAN: cleaning workspace
> ======================================================================
> ======================================================================
>
>
> Buildfile: build.xml
>
> clean:
>   [delete] Deleting directory <
> https://builds.apache.org/job/Pig-trunk/ws/trunk/src-gen>
>   [delete] Deleting directory <
> https://builds.apache.org/job/Pig-trunk/ws/trunk/src/docs/build>
>   [delete] Deleting directory <
> https://builds.apache.org/job/Pig-trunk/ws/trunk/build>
>   [delete] Deleting directory <
> https://builds.apache.org/job/Pig-trunk/ws/trunk/test/org/apache/pig/test/utils/dotGraph/parser
> >
>   [delete] Deleting: <
> https://builds.apache.org/job/Pig-trunk/ws/trunk/pig.jar>
>   [delete] Deleting: <
> https://builds.apache.org/job/Pig-trunk/ws/trunk/pig-withouthadoop.jar>
>
> clean:
>
> clean:
>
> BUILD SUCCESSFUL
> Total time: 0 seconds
>
>
> ======================================================================
> ======================================================================
> ANALYSIS: ant -Drun.clover=true
> -Dclover.home=/homes/hudson/tools/clover/latest clover test-commit
> generate-clover-reports -Dtest.junit.output.format=xml -Dtest.output=yes
> -Dversion=${BUILD_ID} -Dfindbugs.home=$FINDBUGS_HOME
> -Djava5.home=$JAVA5_HOME -Dforrest.home=$FORREST_HOME
> -Dclover.home=$CLOVER_HOME -Declipse.home=$ECLIPSE_HOME
> ======================================================================
> ======================================================================
>
>
> Buildfile: build.xml
>
> clover.setup:
>    [mkdir] Created dir: <
> https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/clover/db>
> [clover-setup] Clover Version 3.1.0, built on May 31 2011 (build-821)
> [clover-setup] Loaded from:
> /home/jenkins/tools/clover/latest/lib/clover.jar
>
> BUILD FAILED
> java.lang.RuntimeException: Clover upgrades for your license ended
> December 14 2010, and this version of Clover was built May 31 2011. Please
> visit http://www.atlassian.com/clover/renew for information on upgrading
> your license.
>        at
> com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:103)
>        at
> com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:25)
>        at
> com.cenqua.clover.tasks.AbstractCloverTask.execute(AbstractCloverTask.java:52)
>        at
> org.apache.tools.ant.UnknownElement.execute(UnknownElement.java:288)
>        at sun.reflect.GeneratedMethodAccessor1.invoke(Unknown Source)
>        at
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
>        at java.lang.reflect.Method.invoke(Method.java:597)
>        at
> org.apache.tools.ant.dispatch.DispatchUtils.execute(DispatchUtils.java:106)
>        at org.apache.tools.ant.Task.perform(Task.java:348)
>        at org.apache.tools.ant.Target.execute(Target.java:357)
>        at org.apache.tools.ant.Target.performTasks(Target.java:385)
>        at
> org.apache.tools.ant.Project.executeSortedTargets(Project.java:1337)
>        at org.apache.tools.ant.Project.executeTarget(Project.java:1306)
>        at
> org.apache.tools.ant.helper.DefaultExecutor.executeTargets(DefaultExecutor.java:41)
>        at org.apache.tools.ant.Project.executeTargets(Project.java:1189)
>        at org.apache.tools.ant.Main.runBuild(Main.java:758)
>        at org.apache.tools.ant.Main.startAnt(Main.java:217)
>        at org.apache.tools.ant.launch.Launcher.run(Launcher.java:257)
>        at org.apache.tools.ant.launch.Launcher.main(Launcher.java:104)
>
> Total time: 1 second
> Build step 'Execute shell' marked build as failure
> [FINDBUGS] Skipping publisher since build result is FAILURE
> Recording test results
> Publishing Javadoc
> Archiving artifacts
> Recording fingerprints
>

Build failed in Jenkins: Pig-trunk #1217

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1217/changes>

Changes:

[daijy] Fix several e2e tests

[jcoveney] [trunk] PIG-2619: HBaseStorage constructs a Scan with cacheBlocks = false

------------------------------------------
[...truncated 6506 lines...]
 [findbugs]   org.mozilla.javascript.NativeJavaObject
 [findbugs]   jline.ConsoleReaderInputStream
 [findbugs]   org.apache.log4j.PropertyConfigurator
 [findbugs]   org.apache.hadoop.mapred.TaskID
 [findbugs]   org.apache.commons.cli.CommandLine
 [findbugs]   org.python.core.Py
 [findbugs]   org.apache.hadoop.io.BooleanWritable$Comparator
 [findbugs]   org.apache.hadoop.io.LongWritable
 [findbugs]   org.antlr.runtime.BitSet
 [findbugs]   org.apache.hadoop.mapred.jobcontrol.Job
 [findbugs]   org.apache.hadoop.hbase.filter.CompareFilter$CompareOp
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Reader
 [findbugs]   org.mozilla.javascript.NativeFunction
 [findbugs]   org.apache.hadoop.mapreduce.Counter
 [findbugs]   org.codehaus.jackson.JsonEncoding
 [findbugs]   org.codehaus.jackson.JsonParseException
 [findbugs]   org.python.core.PyCode
 [findbugs]   com.jcraft.jsch.HostKey
 [findbugs]   org.apache.hadoop.hbase.filter.Filter
 [findbugs]   org.apache.commons.logging.Log
 [findbugs]   com.google.common.util.concurrent.ListenableFuture
 [findbugs]   org.apache.hadoop.util.RunJar
 [findbugs]   org.apache.hadoop.mapred.Counters$Group
 [findbugs]   com.jcraft.jsch.ChannelExec
 [findbugs]   org.apache.hadoop.hbase.util.Base64
 [findbugs]   org.antlr.runtime.TokenStream
 [findbugs]   org.apache.hadoop.io.IOUtils
 [findbugs]   com.google.common.util.concurrent.CheckedFuture
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Reader$Scanner$Entry
 [findbugs]   org.apache.hadoop.fs.FSDataInputStream
 [findbugs]   org.python.core.PyObject
 [findbugs]   jline.History
 [findbugs]   org.apache.hadoop.io.BooleanWritable
 [findbugs]   org.apache.log4j.Logger
 [findbugs]   org.apache.hadoop.hbase.filter.FamilyFilter
 [findbugs]   org.antlr.runtime.IntStream
 [findbugs]   org.apache.hadoop.util.ReflectionUtils
 [findbugs]   org.apache.hadoop.fs.ContentSummary
 [findbugs]   org.python.core.PyTuple
 [findbugs]   org.apache.hadoop.conf.Configuration
 [findbugs]   com.google.common.base.Joiner
 [findbugs]   org.apache.hadoop.mapreduce.lib.input.FileSplit
 [findbugs]   org.apache.hadoop.mapred.Counters$Counter
 [findbugs]   com.jcraft.jsch.Channel
 [findbugs]   org.apache.hadoop.mapred.JobPriority
 [findbugs]   org.apache.commons.cli.Options
 [findbugs]   org.apache.hadoop.mapred.JobID
 [findbugs]   org.apache.hadoop.util.bloom.BloomFilter
 [findbugs]   org.python.core.PyFrame
 [findbugs]   org.apache.hadoop.hbase.filter.CompareFilter
 [findbugs]   org.apache.hadoop.util.VersionInfo
 [findbugs]   org.python.core.PyString
 [findbugs]   org.apache.hadoop.io.Text$Comparator
 [findbugs]   org.antlr.runtime.MismatchedSetException
 [findbugs]   org.apache.hadoop.io.BytesWritable
 [findbugs]   org.apache.hadoop.fs.FsShell
 [findbugs]   org.mozilla.javascript.ImporterTopLevel
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableOutputFormat
 [findbugs]   org.apache.hadoop.mapred.TaskReport
 [findbugs]   org.antlr.runtime.tree.RewriteRuleSubtreeStream
 [findbugs]   org.apache.commons.cli.HelpFormatter
 [findbugs]   org.mozilla.javascript.NativeObject
 [findbugs]   org.apache.hadoop.hbase.HConstants
 [findbugs]   org.apache.hadoop.io.serializer.Deserializer
 [findbugs]   org.antlr.runtime.FailedPredicateException
 [findbugs]   org.apache.hadoop.io.compress.CompressionCodec
 [findbugs]   org.apache.hadoop.fs.FileStatus
 [findbugs]   org.apache.hadoop.hbase.client.Result
 [findbugs]   org.apache.hadoop.mapreduce.JobContext
 [findbugs]   org.codehaus.jackson.JsonGenerator
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptContext
 [findbugs]   org.apache.hadoop.io.BytesWritable$Comparator
 [findbugs]   org.apache.hadoop.io.LongWritable$Comparator
 [findbugs]   org.codehaus.jackson.map.util.LRUMap
 [findbugs]   org.apache.hadoop.hbase.util.Bytes
 [findbugs]   org.antlr.runtime.MismatchedTokenException
 [findbugs]   org.codehaus.jackson.JsonParser
 [findbugs]   com.jcraft.jsch.UserInfo
 [findbugs]   org.python.core.PyException
 [findbugs]   org.apache.commons.cli.ParseException
 [findbugs]   org.apache.hadoop.io.compress.CompressionOutputStream
 [findbugs]   org.apache.hadoop.hbase.filter.WritableByteArrayComparable
 [findbugs]   org.antlr.runtime.tree.CommonTreeNodeStream
 [findbugs]   org.apache.log4j.Level
 [findbugs]   org.apache.hadoop.hbase.client.Scan
 [findbugs]   org.apache.hadoop.mapreduce.Job
 [findbugs]   com.google.common.util.concurrent.Futures
 [findbugs]   org.apache.commons.logging.LogFactory
 [findbugs]   org.apache.commons.codec.binary.Base64
 [findbugs]   org.codehaus.jackson.map.ObjectMapper
 [findbugs]   org.apache.hadoop.fs.FileSystem
 [findbugs]   org.apache.hadoop.hbase.filter.FilterList$Operator
 [findbugs]   org.apache.hadoop.hbase.io.ImmutableBytesWritable
 [findbugs]   org.apache.hadoop.io.serializer.SerializationFactory
 [findbugs]   org.antlr.runtime.tree.TreeAdaptor
 [findbugs]   org.apache.hadoop.mapred.RunningJob
 [findbugs]   org.antlr.runtime.CommonTokenStream
 [findbugs]   org.apache.hadoop.io.DataInputBuffer
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile
 [findbugs]   org.apache.commons.cli.GnuParser
 [findbugs]   org.mozilla.javascript.Context
 [findbugs]   org.apache.hadoop.io.FloatWritable
 [findbugs]   org.antlr.runtime.tree.RewriteEarlyExitException
 [findbugs]   org.apache.hadoop.hbase.HBaseConfiguration
 [findbugs]   org.codehaus.jackson.JsonGenerationException
 [findbugs]   org.apache.hadoop.mapreduce.TaskInputOutputContext
 [findbugs]   org.apache.hadoop.io.compress.GzipCodec
 [findbugs]   org.apache.hadoop.mapred.jobcontrol.JobControl
 [findbugs]   org.antlr.runtime.BaseRecognizer
 [findbugs]   org.apache.hadoop.fs.FileUtil
 [findbugs]   org.apache.hadoop.fs.Path
 [findbugs]   org.apache.hadoop.hbase.client.Put
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Writer
 [findbugs]   jline.ConsoleReader
 [findbugs]   com.google.common.collect.Lists
 [findbugs]   org.apache.hadoop.mapreduce.MapContext
 [findbugs]   org.python.core.PyJavaPackage
 [findbugs]   org.apache.hadoop.hbase.filter.ColumnPrefixFilter
 [findbugs]   org.python.core.PyStringMap
 [findbugs]   org.apache.hadoop.mapreduce.TaskID
 [findbugs]   org.apache.hadoop.hbase.client.HTable
 [findbugs]   org.apache.hadoop.io.FloatWritable$Comparator
 [findbugs]   org.apache.zookeeper.ZooKeeper
 [findbugs]   org.codehaus.jackson.map.JsonMappingException
 [findbugs]   org.python.core.PyFunction
 [findbugs]   org.antlr.runtime.TokenSource
 [findbugs]   com.jcraft.jsch.ChannelDirectTCPIP
 [findbugs]   com.jcraft.jsch.JSchException
 [findbugs]   org.python.util.PythonInterpreter
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil
 [findbugs]   org.python.core.PyInteger
 [findbugs]   org.apache.hadoop.mapred.JobConf
 [findbugs]   org.apache.hadoop.util.bloom.Key
 [findbugs]   org.apache.hadoop.io.Text
 [findbugs]   org.antlr.runtime.NoViableAltException
 [findbugs]   org.apache.hadoop.util.GenericOptionsParser
 [findbugs]   org.apache.hadoop.mapreduce.JobID
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptID
 [findbugs]   org.apache.hadoop.filecache.DistributedCache
 [findbugs]   org.apache.hadoop.fs.FSDataOutputStream
 [findbugs]   org.python.core.PyList
 [findbugs]   org.antlr.runtime.tree.TreeNodeStream
 [findbugs]   org.apache.hadoop.hbase.filter.BinaryComparator
 [findbugs]   dk.brics.automaton.RegExp
 [findbugs]   org.mozilla.javascript.Scriptable
 [findbugs]   org.mozilla.javascript.EcmaError
 [findbugs]   org.apache.hadoop.io.serializer.Serializer
 [findbugs]   org.apache.hadoop.util.bloom.Filter
 [findbugs]   org.python.core.PyNone
 [findbugs]   org.mozilla.javascript.Function
 [findbugs]   org.python.core.PySystemState
 [findbugs]   org.antlr.runtime.RecognizerSharedState
 [findbugs]   org.codehaus.jackson.JsonFactory
 [findbugs]   org.antlr.runtime.EarlyExitException
 [findbugs]   org.apache.hadoop.hdfs.DistributedFileSystem
 [findbugs]   org.apache.hadoop.util.LineReader
 [findbugs] Warnings generated: 25
 [findbugs] Missing classes: 233
 [findbugs] Calculating exit code...
 [findbugs] Setting 'missing class' flag (2)
 [findbugs] Setting 'bugs found' flag (1)
 [findbugs] Exit code set to: 3
 [findbugs] Java Result: 3
 [findbugs] Classes needed for analysis were missing
 [findbugs] Output saved to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml>
     [xslt] Processing <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml> to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.html>
     [xslt] Loading stylesheet /home/jenkins/tools/findbugs/latest/src/xsl/default.xsl

BUILD SUCCESSFUL
Total time: 14 minutes 2 seconds


======================================================================
======================================================================
STORE: saving artifacts
======================================================================
======================================================================




======================================================================
======================================================================
CLEAN: cleaning workspace
======================================================================
======================================================================


Buildfile: build.xml

clean:
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src-gen>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/docs/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/test/org/apache/pig/test/utils/dotGraph/parser>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig.jar>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig-withouthadoop.jar>

clean:

clean:

BUILD SUCCESSFUL
Total time: 0 seconds


======================================================================
======================================================================
ANALYSIS: ant -Drun.clover=true -Dclover.home=/homes/hudson/tools/clover/latest clover test-commit generate-clover-reports -Dtest.junit.output.format=xml -Dtest.output=yes -Dversion=${BUILD_ID} -Dfindbugs.home=$FINDBUGS_HOME -Djava5.home=$JAVA5_HOME -Dforrest.home=$FORREST_HOME -Dclover.home=$CLOVER_HOME -Declipse.home=$ECLIPSE_HOME
======================================================================
======================================================================


Buildfile: build.xml

clover.setup:
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/clover/db>
[clover-setup] Clover Version 3.1.0, built on May 31 2011 (build-821)
[clover-setup] Loaded from: /home/jenkins/tools/clover/latest/lib/clover.jar

BUILD FAILED
java.lang.RuntimeException: Clover upgrades for your license ended December 14 2010, and this version of Clover was built May 31 2011. Please visit http://www.atlassian.com/clover/renew for information on upgrading your license.
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:103)
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:25)
	at com.cenqua.clover.tasks.AbstractCloverTask.execute(AbstractCloverTask.java:52)
	at org.apache.tools.ant.UnknownElement.execute(UnknownElement.java:288)
	at sun.reflect.GeneratedMethodAccessor1.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
	at java.lang.reflect.Method.invoke(Method.java:597)
	at org.apache.tools.ant.dispatch.DispatchUtils.execute(DispatchUtils.java:106)
	at org.apache.tools.ant.Task.perform(Task.java:348)
	at org.apache.tools.ant.Target.execute(Target.java:357)
	at org.apache.tools.ant.Target.performTasks(Target.java:385)
	at org.apache.tools.ant.Project.executeSortedTargets(Project.java:1337)
	at org.apache.tools.ant.Project.executeTarget(Project.java:1306)
	at org.apache.tools.ant.helper.DefaultExecutor.executeTargets(DefaultExecutor.java:41)
	at org.apache.tools.ant.Project.executeTargets(Project.java:1189)
	at org.apache.tools.ant.Main.runBuild(Main.java:758)
	at org.apache.tools.ant.Main.startAnt(Main.java:217)
	at org.apache.tools.ant.launch.Launcher.run(Launcher.java:257)
	at org.apache.tools.ant.launch.Launcher.main(Launcher.java:104)

Total time: 1 second
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1216

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1216/changes>

Changes:

[jcoveney] PIG-2540 piggybank trunk AvroStorage can't read schema on s3 in e/r mode

[jcoveney] [PIG-2618] e2e fails to build

------------------------------------------
[...truncated 53336 lines...]
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:128)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/03/27 10:36:42 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/03/27 10:36:42 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/03/27 10:36:42 INFO ipc.Server: Stopping server on 39951
    [junit] 12/03/27 10:36:42 INFO ipc.Server: IPC Server handler 0 on 39951: exiting
    [junit] 12/03/27 10:36:42 INFO ipc.Server: IPC Server handler 1 on 39951: exiting
    [junit] 12/03/27 10:36:42 INFO ipc.Server: IPC Server handler 2 on 39951: exiting
    [junit] 12/03/27 10:36:42 INFO ipc.Server: Stopping IPC Server listener on 39951
    [junit] 12/03/27 10:36:42 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/03/27 10:36:42 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/03/27 10:36:42 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/03/27 10:36:42 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:48070, storageID=DS-1560670394-67.195.138.20-48070-1332844056038, infoPort=38899, ipcPort=39951):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/03/27 10:36:42 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/03/27 10:36:42 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/03/27 10:36:43 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:52395 to delete  blk_5471282960740557369_1102 blk_15802032107254893_1102
    [junit] 12/03/27 10:36:43 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:50381 to delete  blk_-4893342285719632994_1095 blk_8200084384064892388_1101 blk_15802032107254893_1102
    [junit] 12/03/27 10:36:43 INFO mapred.TaskTracker: Received 'KillJobAction' for job: job_20120327102736432_0012
    [junit] 12/03/27 10:36:43 WARN mapred.TaskTracker: Unknown job job_20120327102736432_0012 being deleted.
    [junit] 12/03/27 10:36:43 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/03/27 10:36:43 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:48070, storageID=DS-1560670394-67.195.138.20-48070-1332844056038, infoPort=38899, ipcPort=39951):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data5/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data6/current'}>
    [junit] 12/03/27 10:36:43 INFO ipc.Server: Stopping server on 39951
    [junit] 12/03/27 10:36:43 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/03/27 10:36:43 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/03/27 10:36:43 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/03/27 10:36:43 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/03/27 10:36:43 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1965725253
    [junit] Shutting down DataNode 1
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1965725253
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:128)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/03/27 10:36:43 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/03/27 10:36:43 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/03/27 10:36:44 INFO ipc.Server: Stopping server on 59830
    [junit] 12/03/27 10:36:44 INFO ipc.Server: IPC Server handler 0 on 59830: exiting
    [junit] 12/03/27 10:36:44 INFO ipc.Server: IPC Server handler 1 on 59830: exiting
    [junit] 12/03/27 10:36:44 INFO ipc.Server: IPC Server handler 2 on 59830: exiting
    [junit] 12/03/27 10:36:44 INFO ipc.Server: Stopping IPC Server listener on 59830
    [junit] 12/03/27 10:36:44 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/03/27 10:36:44 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/03/27 10:36:44 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/03/27 10:36:44 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:46589, storageID=DS-1589661793-67.195.138.20-46589-1332844055694, infoPort=49266, ipcPort=59830):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/03/27 10:36:44 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/03/27 10:36:44 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/03/27 10:36:45 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/03/27 10:36:45 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:46589, storageID=DS-1589661793-67.195.138.20-46589-1332844055694, infoPort=49266, ipcPort=59830):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data3/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data4/current'}>
    [junit] 12/03/27 10:36:45 INFO ipc.Server: Stopping server on 59830
    [junit] 12/03/27 10:36:45 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/03/27 10:36:45 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/03/27 10:36:45 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/03/27 10:36:45 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] Shutting down DataNode 0
    [junit] 12/03/27 10:36:45 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1574283536
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1574283536
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:128)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/03/27 10:36:45 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/03/27 10:36:45 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/03/27 10:36:45 INFO datanode.DataNode: Scheduling block blk_-4893342285719632994_1095 file build/test/data/dfs/data/data1/current/blk_-4893342285719632994 for deletion
    [junit] 12/03/27 10:36:45 INFO datanode.DataNode: Scheduling block blk_15802032107254893_1102 file build/test/data/dfs/data/data1/current/blk_15802032107254893 for deletion
    [junit] 12/03/27 10:36:45 INFO datanode.DataNode: Scheduling block blk_8200084384064892388_1101 file build/test/data/dfs/data/data2/current/blk_8200084384064892388 for deletion
    [junit] 12/03/27 10:36:45 INFO datanode.DataNode: Deleted block blk_-4893342285719632994_1095 at file build/test/data/dfs/data/data1/current/blk_-4893342285719632994
    [junit] 12/03/27 10:36:45 INFO datanode.DataNode: Deleted block blk_8200084384064892388_1101 at file build/test/data/dfs/data/data2/current/blk_8200084384064892388
    [junit] 12/03/27 10:36:45 INFO datanode.DataNode: Deleted block blk_15802032107254893_1102 at file build/test/data/dfs/data/data1/current/blk_15802032107254893
    [junit] 12/03/27 10:36:45 INFO ipc.Server: Stopping server on 43609
    [junit] 12/03/27 10:36:45 INFO ipc.Server: IPC Server handler 0 on 43609: exiting
    [junit] 12/03/27 10:36:45 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/03/27 10:36:45 INFO ipc.Server: Stopping IPC Server listener on 43609
    [junit] 12/03/27 10:36:45 INFO ipc.Server: IPC Server handler 2 on 43609: exiting
    [junit] 12/03/27 10:36:45 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/03/27 10:36:45 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:50381, storageID=DS-19095612-67.195.138.20-50381-1332844055325, infoPort=49614, ipcPort=43609):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/03/27 10:36:45 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/03/27 10:36:45 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/03/27 10:36:45 INFO ipc.Server: IPC Server handler 1 on 43609: exiting
    [junit] 12/03/27 10:36:45 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/03/27 10:36:46 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/03/27 10:36:46 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:50381, storageID=DS-19095612-67.195.138.20-50381-1332844055325, infoPort=49614, ipcPort=43609):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data1/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data2/current'}>
    [junit] 12/03/27 10:36:46 WARN util.MBeans: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.unRegisterMXBean(DataNode.java:513)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:726)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.run(DataNode.java:1442)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/03/27 10:36:46 INFO ipc.Server: Stopping server on 43609
    [junit] 12/03/27 10:36:46 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/03/27 10:36:46 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/03/27 10:36:46 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/03/27 10:36:46 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/03/27 10:36:46 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId711748214
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId711748214
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:128)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/03/27 10:36:46 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/03/27 10:36:46 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/03/27 10:36:46 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:48070 to delete  blk_5471282960740557369_1102 blk_-4893342285719632994_1095 blk_8200084384064892388_1101
    [junit] 12/03/27 10:36:46 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:46589 to delete  blk_5471282960740557369_1102 blk_-4893342285719632994_1095 blk_8200084384064892388_1101 blk_15802032107254893_1102
    [junit] 12/03/27 10:36:46 WARN namenode.FSNamesystem: ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 12/03/27 10:36:46 INFO namenode.FSNamesystem: Number of transactions: 502 Total time for transactions(ms): 16Number of transactions batched in Syncs: 156 Number of syncs: 348 SyncTimes(ms): 7132 630 
    [junit] 12/03/27 10:36:46 INFO namenode.DecommissionManager: Interrupted Monitor
    [junit] java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.hdfs.server.namenode.DecommissionManager$Monitor.run(DecommissionManager.java:65)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/03/27 10:36:46 INFO ipc.Server: Stopping server on 54579
    [junit] 12/03/27 10:36:46 INFO ipc.Server: IPC Server handler 0 on 54579: exiting
    [junit] 12/03/27 10:36:46 INFO ipc.Server: IPC Server handler 1 on 54579: exiting
    [junit] 12/03/27 10:36:46 INFO ipc.Server: IPC Server handler 2 on 54579: exiting
    [junit] 12/03/27 10:36:46 INFO ipc.Server: IPC Server handler 4 on 54579: exiting
    [junit] 12/03/27 10:36:46 INFO ipc.Server: IPC Server handler 3 on 54579: exiting
    [junit] 12/03/27 10:36:46 INFO ipc.Server: IPC Server handler 6 on 54579: exiting
    [junit] 12/03/27 10:36:46 INFO ipc.Server: IPC Server handler 7 on 54579: exiting
    [junit] 12/03/27 10:36:46 INFO ipc.Server: IPC Server handler 5 on 54579: exiting
    [junit] 12/03/27 10:36:46 INFO ipc.Server: IPC Server handler 9 on 54579: exiting
    [junit] 12/03/27 10:36:46 INFO ipc.Server: IPC Server handler 8 on 54579: exiting
    [junit] 12/03/27 10:36:46 INFO ipc.Server: Stopping IPC Server listener on 54579
    [junit] 12/03/27 10:36:46 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/03/27 10:36:46 INFO ipc.Server: Stopping IPC Server Responder
    [junit] Tests run: 17, Failures: 3, Errors: 3, Time elapsed: 544.399 sec
    [junit] Test org.apache.pig.test.TestStore FAILED
    [junit] Running org.apache.pig.test.TestStringUDFs
    [junit] 12/03/27 10:36:47 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.NullPointerException
    [junit] 12/03/27 10:36:47 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/03/27 10:36:47 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -1
    [junit] 12/03/27 10:36:47 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -8
    [junit] 12/03/27 10:36:47 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/03/27 10:36:47 WARN builtin.INDEXOF: No logger object provided to UDF: org.apache.pig.builtin.INDEXOF. Failed to process input; error - null
    [junit] 12/03/27 10:36:47 WARN builtin.LAST_INDEX_OF: No logger object provided to UDF: org.apache.pig.builtin.LAST_INDEX_OF. Failed to process input; error - null
    [junit] Tests run: 11, Failures: 0, Errors: 0, Time elapsed: 0.107 sec
   [delete] Deleting directory /tmp/pig_junit_tmp382831045

BUILD FAILED
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:781: The following error occurred while executing this line:
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:836: Tests failed!

Total time: 24 minutes 1 second
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1215

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1215/changes>

Changes:

[daijy] Fix several e2e test failures

------------------------------------------
[...truncated 6487 lines...]
 [findbugs]   org.mozilla.javascript.NativeJavaObject
 [findbugs]   jline.ConsoleReaderInputStream
 [findbugs]   org.apache.log4j.PropertyConfigurator
 [findbugs]   org.apache.hadoop.mapred.TaskID
 [findbugs]   org.apache.commons.cli.CommandLine
 [findbugs]   org.python.core.Py
 [findbugs]   org.apache.hadoop.io.BooleanWritable$Comparator
 [findbugs]   org.apache.hadoop.io.LongWritable
 [findbugs]   org.antlr.runtime.BitSet
 [findbugs]   org.apache.hadoop.mapred.jobcontrol.Job
 [findbugs]   org.apache.hadoop.hbase.filter.CompareFilter$CompareOp
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Reader
 [findbugs]   org.mozilla.javascript.NativeFunction
 [findbugs]   org.apache.hadoop.mapreduce.Counter
 [findbugs]   org.codehaus.jackson.JsonEncoding
 [findbugs]   org.codehaus.jackson.JsonParseException
 [findbugs]   org.python.core.PyCode
 [findbugs]   com.jcraft.jsch.HostKey
 [findbugs]   org.apache.hadoop.hbase.filter.Filter
 [findbugs]   org.apache.commons.logging.Log
 [findbugs]   com.google.common.util.concurrent.ListenableFuture
 [findbugs]   org.apache.hadoop.util.RunJar
 [findbugs]   org.apache.hadoop.mapred.Counters$Group
 [findbugs]   com.jcraft.jsch.ChannelExec
 [findbugs]   org.apache.hadoop.hbase.util.Base64
 [findbugs]   org.antlr.runtime.TokenStream
 [findbugs]   org.apache.hadoop.io.IOUtils
 [findbugs]   com.google.common.util.concurrent.CheckedFuture
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Reader$Scanner$Entry
 [findbugs]   org.apache.hadoop.fs.FSDataInputStream
 [findbugs]   org.python.core.PyObject
 [findbugs]   jline.History
 [findbugs]   org.apache.hadoop.io.BooleanWritable
 [findbugs]   org.apache.log4j.Logger
 [findbugs]   org.apache.hadoop.hbase.filter.FamilyFilter
 [findbugs]   org.antlr.runtime.IntStream
 [findbugs]   org.apache.hadoop.util.ReflectionUtils
 [findbugs]   org.apache.hadoop.fs.ContentSummary
 [findbugs]   org.python.core.PyTuple
 [findbugs]   org.apache.hadoop.conf.Configuration
 [findbugs]   com.google.common.base.Joiner
 [findbugs]   org.apache.hadoop.mapreduce.lib.input.FileSplit
 [findbugs]   org.apache.hadoop.mapred.Counters$Counter
 [findbugs]   com.jcraft.jsch.Channel
 [findbugs]   org.apache.hadoop.mapred.JobPriority
 [findbugs]   org.apache.commons.cli.Options
 [findbugs]   org.apache.hadoop.mapred.JobID
 [findbugs]   org.apache.hadoop.util.bloom.BloomFilter
 [findbugs]   org.python.core.PyFrame
 [findbugs]   org.apache.hadoop.hbase.filter.CompareFilter
 [findbugs]   org.apache.hadoop.util.VersionInfo
 [findbugs]   org.python.core.PyString
 [findbugs]   org.apache.hadoop.io.Text$Comparator
 [findbugs]   org.antlr.runtime.MismatchedSetException
 [findbugs]   org.apache.hadoop.io.BytesWritable
 [findbugs]   org.apache.hadoop.fs.FsShell
 [findbugs]   org.mozilla.javascript.ImporterTopLevel
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableOutputFormat
 [findbugs]   org.apache.hadoop.mapred.TaskReport
 [findbugs]   org.antlr.runtime.tree.RewriteRuleSubtreeStream
 [findbugs]   org.apache.commons.cli.HelpFormatter
 [findbugs]   org.mozilla.javascript.NativeObject
 [findbugs]   org.apache.hadoop.hbase.HConstants
 [findbugs]   org.apache.hadoop.io.serializer.Deserializer
 [findbugs]   org.antlr.runtime.FailedPredicateException
 [findbugs]   org.apache.hadoop.io.compress.CompressionCodec
 [findbugs]   org.apache.hadoop.fs.FileStatus
 [findbugs]   org.apache.hadoop.hbase.client.Result
 [findbugs]   org.apache.hadoop.mapreduce.JobContext
 [findbugs]   org.codehaus.jackson.JsonGenerator
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptContext
 [findbugs]   org.apache.hadoop.io.BytesWritable$Comparator
 [findbugs]   org.apache.hadoop.io.LongWritable$Comparator
 [findbugs]   org.codehaus.jackson.map.util.LRUMap
 [findbugs]   org.apache.hadoop.hbase.util.Bytes
 [findbugs]   org.antlr.runtime.MismatchedTokenException
 [findbugs]   org.codehaus.jackson.JsonParser
 [findbugs]   com.jcraft.jsch.UserInfo
 [findbugs]   org.python.core.PyException
 [findbugs]   org.apache.commons.cli.ParseException
 [findbugs]   org.apache.hadoop.io.compress.CompressionOutputStream
 [findbugs]   org.apache.hadoop.hbase.filter.WritableByteArrayComparable
 [findbugs]   org.antlr.runtime.tree.CommonTreeNodeStream
 [findbugs]   org.apache.log4j.Level
 [findbugs]   org.apache.hadoop.hbase.client.Scan
 [findbugs]   org.apache.hadoop.mapreduce.Job
 [findbugs]   com.google.common.util.concurrent.Futures
 [findbugs]   org.apache.commons.logging.LogFactory
 [findbugs]   org.apache.commons.codec.binary.Base64
 [findbugs]   org.codehaus.jackson.map.ObjectMapper
 [findbugs]   org.apache.hadoop.fs.FileSystem
 [findbugs]   org.apache.hadoop.hbase.filter.FilterList$Operator
 [findbugs]   org.apache.hadoop.hbase.io.ImmutableBytesWritable
 [findbugs]   org.apache.hadoop.io.serializer.SerializationFactory
 [findbugs]   org.antlr.runtime.tree.TreeAdaptor
 [findbugs]   org.apache.hadoop.mapred.RunningJob
 [findbugs]   org.antlr.runtime.CommonTokenStream
 [findbugs]   org.apache.hadoop.io.DataInputBuffer
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile
 [findbugs]   org.apache.commons.cli.GnuParser
 [findbugs]   org.mozilla.javascript.Context
 [findbugs]   org.apache.hadoop.io.FloatWritable
 [findbugs]   org.antlr.runtime.tree.RewriteEarlyExitException
 [findbugs]   org.apache.hadoop.hbase.HBaseConfiguration
 [findbugs]   org.codehaus.jackson.JsonGenerationException
 [findbugs]   org.apache.hadoop.mapreduce.TaskInputOutputContext
 [findbugs]   org.apache.hadoop.io.compress.GzipCodec
 [findbugs]   org.apache.hadoop.mapred.jobcontrol.JobControl
 [findbugs]   org.antlr.runtime.BaseRecognizer
 [findbugs]   org.apache.hadoop.fs.FileUtil
 [findbugs]   org.apache.hadoop.fs.Path
 [findbugs]   org.apache.hadoop.hbase.client.Put
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Writer
 [findbugs]   jline.ConsoleReader
 [findbugs]   com.google.common.collect.Lists
 [findbugs]   org.apache.hadoop.mapreduce.MapContext
 [findbugs]   org.python.core.PyJavaPackage
 [findbugs]   org.apache.hadoop.hbase.filter.ColumnPrefixFilter
 [findbugs]   org.python.core.PyStringMap
 [findbugs]   org.apache.hadoop.mapreduce.TaskID
 [findbugs]   org.apache.hadoop.hbase.client.HTable
 [findbugs]   org.apache.hadoop.io.FloatWritable$Comparator
 [findbugs]   org.apache.zookeeper.ZooKeeper
 [findbugs]   org.codehaus.jackson.map.JsonMappingException
 [findbugs]   org.python.core.PyFunction
 [findbugs]   org.antlr.runtime.TokenSource
 [findbugs]   com.jcraft.jsch.ChannelDirectTCPIP
 [findbugs]   com.jcraft.jsch.JSchException
 [findbugs]   org.python.util.PythonInterpreter
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil
 [findbugs]   org.python.core.PyInteger
 [findbugs]   org.apache.hadoop.mapred.JobConf
 [findbugs]   org.apache.hadoop.util.bloom.Key
 [findbugs]   org.apache.hadoop.io.Text
 [findbugs]   org.antlr.runtime.NoViableAltException
 [findbugs]   org.apache.hadoop.util.GenericOptionsParser
 [findbugs]   org.apache.hadoop.mapreduce.JobID
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptID
 [findbugs]   org.apache.hadoop.filecache.DistributedCache
 [findbugs]   org.apache.hadoop.fs.FSDataOutputStream
 [findbugs]   org.python.core.PyList
 [findbugs]   org.antlr.runtime.tree.TreeNodeStream
 [findbugs]   org.apache.hadoop.hbase.filter.BinaryComparator
 [findbugs]   dk.brics.automaton.RegExp
 [findbugs]   org.mozilla.javascript.Scriptable
 [findbugs]   org.mozilla.javascript.EcmaError
 [findbugs]   org.apache.hadoop.io.serializer.Serializer
 [findbugs]   org.apache.hadoop.util.bloom.Filter
 [findbugs]   org.python.core.PyNone
 [findbugs]   org.mozilla.javascript.Function
 [findbugs]   org.python.core.PySystemState
 [findbugs]   org.antlr.runtime.RecognizerSharedState
 [findbugs]   org.codehaus.jackson.JsonFactory
 [findbugs]   org.antlr.runtime.EarlyExitException
 [findbugs]   org.apache.hadoop.hdfs.DistributedFileSystem
 [findbugs]   org.apache.hadoop.util.LineReader
 [findbugs] Warnings generated: 25
 [findbugs] Missing classes: 233
 [findbugs] Calculating exit code...
 [findbugs] Setting 'missing class' flag (2)
 [findbugs] Setting 'bugs found' flag (1)
 [findbugs] Exit code set to: 3
 [findbugs] Java Result: 3
 [findbugs] Classes needed for analysis were missing
 [findbugs] Output saved to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml>
     [xslt] Processing <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml> to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.html>
     [xslt] Loading stylesheet /home/jenkins/tools/findbugs/latest/src/xsl/default.xsl

BUILD SUCCESSFUL
Total time: 9 minutes 43 seconds


======================================================================
======================================================================
STORE: saving artifacts
======================================================================
======================================================================




======================================================================
======================================================================
CLEAN: cleaning workspace
======================================================================
======================================================================


Buildfile: build.xml

clean:
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src-gen>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/docs/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/test/org/apache/pig/test/utils/dotGraph/parser>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig.jar>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig-withouthadoop.jar>

clean:

clean:

BUILD SUCCESSFUL
Total time: 0 seconds


======================================================================
======================================================================
ANALYSIS: ant -Drun.clover=true -Dclover.home=/homes/hudson/tools/clover/latest clover test-commit generate-clover-reports -Dtest.junit.output.format=xml -Dtest.output=yes -Dversion=${BUILD_ID} -Dfindbugs.home=$FINDBUGS_HOME -Djava5.home=$JAVA5_HOME -Dforrest.home=$FORREST_HOME -Dclover.home=$CLOVER_HOME -Declipse.home=$ECLIPSE_HOME
======================================================================
======================================================================


Buildfile: build.xml

clover.setup:
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/clover/db>
[clover-setup] Clover Version 3.1.0, built on May 31 2011 (build-821)
[clover-setup] Loaded from: /home/jenkins/tools/clover/latest/lib/clover.jar

BUILD FAILED
java.lang.RuntimeException: Clover upgrades for your license ended December 14 2010, and this version of Clover was built May 31 2011. Please visit http://www.atlassian.com/clover/renew for information on upgrading your license.
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:103)
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:25)
	at com.cenqua.clover.tasks.AbstractCloverTask.execute(AbstractCloverTask.java:52)
	at org.apache.tools.ant.UnknownElement.execute(UnknownElement.java:288)
	at sun.reflect.GeneratedMethodAccessor1.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
	at java.lang.reflect.Method.invoke(Method.java:597)
	at org.apache.tools.ant.dispatch.DispatchUtils.execute(DispatchUtils.java:106)
	at org.apache.tools.ant.Task.perform(Task.java:348)
	at org.apache.tools.ant.Target.execute(Target.java:357)
	at org.apache.tools.ant.Target.performTasks(Target.java:385)
	at org.apache.tools.ant.Project.executeSortedTargets(Project.java:1337)
	at org.apache.tools.ant.Project.executeTarget(Project.java:1306)
	at org.apache.tools.ant.helper.DefaultExecutor.executeTargets(DefaultExecutor.java:41)
	at org.apache.tools.ant.Project.executeTargets(Project.java:1189)
	at org.apache.tools.ant.Main.runBuild(Main.java:758)
	at org.apache.tools.ant.Main.startAnt(Main.java:217)
	at org.apache.tools.ant.launch.Launcher.run(Launcher.java:257)
	at org.apache.tools.ant.launch.Launcher.main(Launcher.java:104)

Total time: 1 second
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1214

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1214/changes>

Changes:

[daijy] PIG-2550: Custom tuple results in 'Unexpected datatype 110 while reading tuplefrom binary file' while spilling

------------------------------------------
[...truncated 52722 lines...]
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:128)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/03/24 10:36:20 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] Shutting down DataNode 2
    [junit] 12/03/24 10:36:20 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/03/24 10:36:20 INFO ipc.Server: Stopping server on 41818
    [junit] 12/03/24 10:36:20 INFO ipc.Server: IPC Server handler 0 on 41818: exiting
    [junit] 12/03/24 10:36:20 INFO ipc.Server: IPC Server handler 1 on 41818: exiting
    [junit] 12/03/24 10:36:20 INFO ipc.Server: IPC Server handler 2 on 41818: exiting
    [junit] 12/03/24 10:36:20 INFO ipc.Server: Stopping IPC Server listener on 41818
    [junit] 12/03/24 10:36:20 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/03/24 10:36:20 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/03/24 10:36:20 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/03/24 10:36:20 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:50786, storageID=DS-341775186-67.195.138.20-50786-1332584847527, infoPort=47189, ipcPort=41818):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/03/24 10:36:20 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/03/24 10:36:20 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/03/24 10:36:20 INFO datanode.DataNode: Scheduling block blk_5735442229728291317_1102 file build/test/data/dfs/data/data1/current/blk_5735442229728291317 for deletion
    [junit] 12/03/24 10:36:20 INFO datanode.DataNode: Deleted block blk_5735442229728291317_1102 at file build/test/data/dfs/data/data1/current/blk_5735442229728291317
    [junit] 12/03/24 10:36:21 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/03/24 10:36:21 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:50786, storageID=DS-341775186-67.195.138.20-50786-1332584847527, infoPort=47189, ipcPort=41818):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data5/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data6/current'}>
    [junit] 12/03/24 10:36:21 INFO ipc.Server: Stopping server on 41818
    [junit] 12/03/24 10:36:21 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/03/24 10:36:21 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/03/24 10:36:21 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/03/24 10:36:21 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/03/24 10:36:21 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-644488429
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-644488429
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:128)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/03/24 10:36:21 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] Shutting down DataNode 1
    [junit] 12/03/24 10:36:21 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/03/24 10:36:21 INFO ipc.Server: Stopping server on 35517
    [junit] 12/03/24 10:36:21 INFO ipc.Server: IPC Server handler 0 on 35517: exiting
    [junit] 12/03/24 10:36:21 INFO ipc.Server: IPC Server handler 1 on 35517: exiting
    [junit] 12/03/24 10:36:21 INFO ipc.Server: IPC Server handler 2 on 35517: exiting
    [junit] 12/03/24 10:36:21 INFO ipc.Server: Stopping IPC Server listener on 35517
    [junit] 12/03/24 10:36:21 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/03/24 10:36:21 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/03/24 10:36:21 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/03/24 10:36:21 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:34132, storageID=DS-1176409081-67.195.138.20-34132-1332584847144, infoPort=35348, ipcPort=35517):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/03/24 10:36:21 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/03/24 10:36:22 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/03/24 10:36:22 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/03/24 10:36:22 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:34132, storageID=DS-1176409081-67.195.138.20-34132-1332584847144, infoPort=35348, ipcPort=35517):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data3/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data4/current'}>
    [junit] 12/03/24 10:36:22 INFO ipc.Server: Stopping server on 35517
    [junit] 12/03/24 10:36:22 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/03/24 10:36:22 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/03/24 10:36:22 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/03/24 10:36:22 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/03/24 10:36:22 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1960655114
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1960655114
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:128)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/03/24 10:36:22 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] Shutting down DataNode 0
    [junit] 12/03/24 10:36:22 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/03/24 10:36:22 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:34132 to delete  blk_-3841136412746938090_1102
    [junit] 12/03/24 10:36:22 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:57111 to delete  blk_-3841136412746938090_1102 blk_-8249542005391452851_1101 blk_-5743994056258678942_1095
    [junit] 12/03/24 10:36:22 INFO ipc.Server: Stopping server on 55622
    [junit] 12/03/24 10:36:22 INFO ipc.Server: IPC Server handler 0 on 55622: exiting
    [junit] 12/03/24 10:36:22 INFO ipc.Server: IPC Server handler 1 on 55622: exiting
    [junit] 12/03/24 10:36:22 INFO ipc.Server: Stopping IPC Server listener on 55622
    [junit] 12/03/24 10:36:22 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/03/24 10:36:22 INFO ipc.Server: IPC Server handler 2 on 55622: exiting
    [junit] 12/03/24 10:36:22 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/03/24 10:36:22 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/03/24 10:36:22 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:57111, storageID=DS-316389568-67.195.138.20-57111-1332584846745, infoPort=56108, ipcPort=55622):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/03/24 10:36:22 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/03/24 10:36:22 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/03/24 10:36:23 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/03/24 10:36:23 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:57111, storageID=DS-316389568-67.195.138.20-57111-1332584846745, infoPort=56108, ipcPort=55622):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data1/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data2/current'}>
    [junit] 12/03/24 10:36:23 WARN util.MBeans: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.unRegisterMXBean(DataNode.java:513)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:726)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.run(DataNode.java:1442)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/03/24 10:36:23 INFO ipc.Server: Stopping server on 55622
    [junit] 12/03/24 10:36:23 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/03/24 10:36:23 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/03/24 10:36:23 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/03/24 10:36:23 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/03/24 10:36:23 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1638739691
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1638739691
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:128)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/03/24 10:36:23 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/03/24 10:36:23 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/03/24 10:36:23 INFO namenode.FSNamesystem: Number of transactions: 502 Total time for transactions(ms): 12Number of transactions batched in Syncs: 161 Number of syncs: 348 SyncTimes(ms): 4314 403 
    [junit] 12/03/24 10:36:23 WARN namenode.FSNamesystem: ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 12/03/24 10:36:23 INFO namenode.DecommissionManager: Interrupted Monitor
    [junit] java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.hdfs.server.namenode.DecommissionManager$Monitor.run(DecommissionManager.java:65)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/03/24 10:36:23 INFO ipc.Server: Stopping server on 43888
    [junit] 12/03/24 10:36:23 INFO ipc.Server: IPC Server handler 0 on 43888: exiting
    [junit] 12/03/24 10:36:23 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/03/24 10:36:23 INFO ipc.Server: Stopping IPC Server listener on 43888
    [junit] 12/03/24 10:36:23 INFO ipc.Server: IPC Server handler 6 on 43888: exiting
    [junit] 12/03/24 10:36:23 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/03/24 10:36:23 INFO ipc.Server: IPC Server handler 3 on 43888: exiting
    [junit] 12/03/24 10:36:23 INFO ipc.Server: IPC Server handler 2 on 43888: exiting
    [junit] 12/03/24 10:36:23 INFO ipc.Server: IPC Server handler 7 on 43888: exiting
    [junit] 12/03/24 10:36:23 INFO ipc.Server: IPC Server handler 8 on 43888: exiting
    [junit] 12/03/24 10:36:23 INFO ipc.Server: IPC Server handler 5 on 43888: exiting
    [junit] 12/03/24 10:36:23 INFO ipc.Server: IPC Server handler 1 on 43888: exiting
    [junit] 12/03/24 10:36:23 INFO ipc.Server: IPC Server handler 4 on 43888: exiting
    [junit] 12/03/24 10:36:23 INFO ipc.Server: IPC Server handler 9 on 43888: exiting
    [junit] Tests run: 17, Failures: 3, Errors: 3, Time elapsed: 530.44 sec
    [junit] Test org.apache.pig.test.TestStore FAILED
    [junit] Running org.apache.pig.test.TestStringUDFs
    [junit] 12/03/24 10:36:24 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.NullPointerException
    [junit] 12/03/24 10:36:24 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/03/24 10:36:24 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -1
    [junit] 12/03/24 10:36:24 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -8
    [junit] 12/03/24 10:36:24 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/03/24 10:36:24 WARN builtin.INDEXOF: No logger object provided to UDF: org.apache.pig.builtin.INDEXOF. Failed to process input; error - null
    [junit] 12/03/24 10:36:24 WARN builtin.LAST_INDEX_OF: No logger object provided to UDF: org.apache.pig.builtin.LAST_INDEX_OF. Failed to process input; error - null
    [junit] Tests run: 11, Failures: 0, Errors: 0, Time elapsed: 0.105 sec
   [delete] Deleting directory /tmp/pig_junit_tmp1526002099

BUILD FAILED
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:781: The following error occurred while executing this line:
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:836: Tests failed!

Total time: 23 minutes 41 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1213

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1213/changes>

Changes:

[daijy] PIG-2442: Multiple Stores in pig streaming causes infinite waiting

[daijy] PIG-2442: Multiple Stores in pig streaming causes infinite waiting

[daijy] PIG-1270: Push limit into loader

[daijy] PIG-2589: missing TestBoolean.java

[daijy] PIG-2609: e2e harness: make hdfs base path configurable (outside default.conf)

------------------------------------------
[...truncated 52753 lines...]
    [junit] 12/03/23 10:36:42 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/03/23 10:36:42 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1666231610
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1666231610
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] Shutting down DataNode 2
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:128)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/03/23 10:36:42 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/03/23 10:36:42 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/03/23 10:36:42 INFO ipc.Server: Stopping server on 49707
    [junit] 12/03/23 10:36:42 INFO ipc.Server: IPC Server handler 0 on 49707: exiting
    [junit] 12/03/23 10:36:42 INFO ipc.Server: IPC Server handler 2 on 49707: exiting
    [junit] 12/03/23 10:36:42 INFO ipc.Server: IPC Server handler 1 on 49707: exiting
    [junit] 12/03/23 10:36:42 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/03/23 10:36:42 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/03/23 10:36:42 INFO ipc.Server: Stopping IPC Server listener on 49707
    [junit] 12/03/23 10:36:42 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:51845, storageID=DS-253180652-67.195.138.20-51845-1332498470010, infoPort=41614, ipcPort=49707):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/03/23 10:36:42 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/03/23 10:36:42 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/03/23 10:36:42 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/03/23 10:36:42 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:51845, storageID=DS-253180652-67.195.138.20-51845-1332498470010, infoPort=41614, ipcPort=49707):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data5/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data6/current'}>
    [junit] 12/03/23 10:36:42 INFO ipc.Server: Stopping server on 49707
    [junit] 12/03/23 10:36:42 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/03/23 10:36:42 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/03/23 10:36:42 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/03/23 10:36:42 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/03/23 10:36:42 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1448654454
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1448654454
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] Shutting down DataNode 1
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:128)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/03/23 10:36:42 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/03/23 10:36:42 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/03/23 10:36:42 INFO ipc.Server: Stopping server on 56351
    [junit] 12/03/23 10:36:42 INFO ipc.Server: IPC Server handler 2 on 56351: exiting
    [junit] 12/03/23 10:36:42 INFO ipc.Server: Stopping IPC Server listener on 56351
    [junit] 12/03/23 10:36:42 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/03/23 10:36:42 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/03/23 10:36:42 INFO ipc.Server: IPC Server handler 1 on 56351: exiting
    [junit] 12/03/23 10:36:42 INFO ipc.Server: IPC Server handler 0 on 56351: exiting
    [junit] 12/03/23 10:36:42 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:35690, storageID=DS-692503111-67.195.138.20-35690-1332498469661, infoPort=33131, ipcPort=56351):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/03/23 10:36:42 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/03/23 10:36:42 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/03/23 10:36:42 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/03/23 10:36:42 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:35690, storageID=DS-692503111-67.195.138.20-35690-1332498469661, infoPort=33131, ipcPort=56351):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data3/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data4/current'}>
    [junit] 12/03/23 10:36:42 INFO ipc.Server: Stopping server on 56351
    [junit] 12/03/23 10:36:42 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/03/23 10:36:42 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/03/23 10:36:42 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/03/23 10:36:42 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/03/23 10:36:42 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId723625249
    [junit] Shutting down DataNode 0
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId723625249
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:128)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/03/23 10:36:42 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/03/23 10:36:42 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/03/23 10:36:42 INFO ipc.Server: Stopping server on 34202
    [junit] 12/03/23 10:36:42 INFO ipc.Server: IPC Server handler 0 on 34202: exiting
    [junit] 12/03/23 10:36:42 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/03/23 10:36:42 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/03/23 10:36:42 INFO ipc.Server: Stopping IPC Server listener on 34202
    [junit] 12/03/23 10:36:42 INFO ipc.Server: IPC Server handler 1 on 34202: exiting
    [junit] 12/03/23 10:36:42 INFO ipc.Server: IPC Server handler 2 on 34202: exiting
    [junit] 12/03/23 10:36:42 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:39763, storageID=DS-810296157-67.195.138.20-39763-1332498469278, infoPort=46240, ipcPort=34202):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/03/23 10:36:42 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/03/23 10:36:42 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/03/23 10:36:42 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/03/23 10:36:42 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:39763, storageID=DS-810296157-67.195.138.20-39763-1332498469278, infoPort=46240, ipcPort=34202):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data1/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data2/current'}>
    [junit] 12/03/23 10:36:42 WARN util.MBeans: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.unRegisterMXBean(DataNode.java:513)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:726)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.run(DataNode.java:1442)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/03/23 10:36:42 INFO ipc.Server: Stopping server on 34202
    [junit] 12/03/23 10:36:42 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/03/23 10:36:42 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/03/23 10:36:42 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/03/23 10:36:42 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/03/23 10:36:42 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId104293769
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId104293769
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:128)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/03/23 10:36:42 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/03/23 10:36:42 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/03/23 10:36:42 WARN namenode.FSNamesystem: ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 12/03/23 10:36:42 INFO namenode.DecommissionManager: Interrupted Monitor
    [junit] java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.hdfs.server.namenode.DecommissionManager$Monitor.run(DecommissionManager.java:65)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/03/23 10:36:42 INFO namenode.FSNamesystem: Number of transactions: 502 Total time for transactions(ms): 9Number of transactions batched in Syncs: 150 Number of syncs: 348 SyncTimes(ms): 6114 206 
    [junit] 12/03/23 10:36:42 INFO ipc.Server: Stopping server on 49636
    [junit] 12/03/23 10:36:42 INFO ipc.Server: IPC Server handler 0 on 49636: exiting
    [junit] 12/03/23 10:36:42 INFO ipc.Server: IPC Server handler 1 on 49636: exiting
    [junit] 12/03/23 10:36:42 INFO ipc.Server: IPC Server handler 2 on 49636: exiting
    [junit] 12/03/23 10:36:42 INFO ipc.Server: IPC Server handler 3 on 49636: exiting
    [junit] 12/03/23 10:36:42 INFO ipc.Server: IPC Server handler 4 on 49636: exiting
    [junit] 12/03/23 10:36:42 INFO ipc.Server: Stopping IPC Server listener on 49636
    [junit] 12/03/23 10:36:42 INFO ipc.Server: IPC Server handler 8 on 49636: exiting
    [junit] 12/03/23 10:36:42 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/03/23 10:36:42 INFO ipc.Server: IPC Server handler 5 on 49636: exiting
    [junit] 12/03/23 10:36:42 INFO ipc.Server: IPC Server handler 7 on 49636: exiting
    [junit] 12/03/23 10:36:42 INFO ipc.Server: IPC Server handler 6 on 49636: exiting
    [junit] 12/03/23 10:36:42 INFO ipc.Server: IPC Server handler 9 on 49636: exiting
    [junit] 12/03/23 10:36:42 INFO ipc.Server: Stopping IPC Server Responder
    [junit] Tests run: 17, Failures: 3, Errors: 3, Time elapsed: 526.817 sec
    [junit] Test org.apache.pig.test.TestStore FAILED
    [junit] Running org.apache.pig.test.TestStringUDFs
    [junit] 12/03/23 10:36:43 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.NullPointerException
    [junit] 12/03/23 10:36:43 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/03/23 10:36:43 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -1
    [junit] 12/03/23 10:36:43 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -8
    [junit] 12/03/23 10:36:43 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/03/23 10:36:43 WARN builtin.INDEXOF: No logger object provided to UDF: org.apache.pig.builtin.INDEXOF. Failed to process input; error - null
    [junit] 12/03/23 10:36:43 WARN builtin.LAST_INDEX_OF: No logger object provided to UDF: org.apache.pig.builtin.LAST_INDEX_OF. Failed to process input; error - null
    [junit] Tests run: 11, Failures: 0, Errors: 0, Time elapsed: 0.117 sec
   [delete] Deleting directory /tmp/pig_junit_tmp1523870312

BUILD FAILED
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:781: The following error occurred while executing this line:
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:836: Tests failed!

Total time: 23 minutes 58 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints