You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@pig.apache.org by Apache Jenkins Server <je...@builds.apache.org> on 2012/08/09 12:35:40 UTC

Build failed in Jenkins: Pig-trunk #1295

See <https://builds.apache.org/job/Pig-trunk/1295/changes>

Changes:

[jcoveney] [PIG-2862] Hardcode certain tuple lengths into the TUPLE BinInterSedes byte identifier (jcoveney)

------------------------------------------
[...truncated 40322 lines...]
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:129)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] Shutting down DataNode 2
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/09 10:35:00 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/09 10:35:00 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/09 10:35:00 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:43155 to delete  blk_2869590480533377489_1073
    [junit] 12/08/09 10:35:00 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:57966 to delete  blk_-2884158153391299948_1078 blk_2869590480533377489_1073
    [junit] 12/08/09 10:35:00 INFO ipc.Server: Stopping server on 36501
    [junit] 12/08/09 10:35:00 INFO ipc.Server: IPC Server handler 1 on 36501: exiting
    [junit] 12/08/09 10:35:00 INFO ipc.Server: IPC Server handler 0 on 36501: exiting
    [junit] 12/08/09 10:35:00 INFO ipc.Server: IPC Server handler 2 on 36501: exiting
    [junit] 12/08/09 10:35:00 INFO ipc.Server: Stopping IPC Server listener on 36501
    [junit] 12/08/09 10:35:00 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/09 10:35:00 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/09 10:35:00 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/08/09 10:35:00 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:57966, storageID=DS-1745960504-67.195.138.20-57966-1344507983180, infoPort=57805, ipcPort=36501):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/08/09 10:35:00 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/08/09 10:35:00 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/08/09 10:35:01 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/09 10:35:01 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:57966, storageID=DS-1745960504-67.195.138.20-57966-1344507983180, infoPort=57805, ipcPort=36501):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data5/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data6/current'}>
    [junit] 12/08/09 10:35:01 INFO ipc.Server: Stopping server on 36501
    [junit] 12/08/09 10:35:01 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/09 10:35:01 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/09 10:35:01 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/08/09 10:35:01 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] Shutting down DataNode 1
    [junit] 12/08/09 10:35:01 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-831411591
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-831411591
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:129)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/09 10:35:01 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/09 10:35:01 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/09 10:35:01 INFO ipc.Server: Stopping server on 53188
    [junit] 12/08/09 10:35:01 INFO ipc.Server: IPC Server handler 1 on 53188: exiting
    [junit] 12/08/09 10:35:01 INFO ipc.Server: IPC Server handler 2 on 53188: exiting
    [junit] 12/08/09 10:35:01 INFO ipc.Server: IPC Server handler 0 on 53188: exiting
    [junit] 12/08/09 10:35:01 INFO ipc.Server: Stopping IPC Server listener on 53188
    [junit] 12/08/09 10:35:01 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/09 10:35:01 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/09 10:35:01 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/08/09 10:35:01 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:50457, storageID=DS-1010452691-67.195.138.20-50457-1344507982798, infoPort=40226, ipcPort=53188):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/08/09 10:35:01 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/08/09 10:35:01 INFO mapred.TaskTracker: Received 'KillJobAction' for job: job_20120809102623594_0012
    [junit] 12/08/09 10:35:01 WARN mapred.TaskTracker: Unknown job job_20120809102623594_0012 being deleted.
    [junit] 12/08/09 10:35:01 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/08/09 10:35:01 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:50457, storageID=DS-1010452691-67.195.138.20-50457-1344507982798, infoPort=40226, ipcPort=53188):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data3/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data4/current'}>
    [junit] 12/08/09 10:35:01 INFO ipc.Server: Stopping server on 53188
    [junit] 12/08/09 10:35:01 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/09 10:35:01 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/09 10:35:01 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/08/09 10:35:01 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/08/09 10:35:02 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/09 10:35:02 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId990540267
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId990540267
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] Shutting down DataNode 0
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:129)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/09 10:35:02 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/09 10:35:02 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/09 10:35:02 INFO ipc.Server: Stopping server on 33901
    [junit] 12/08/09 10:35:02 INFO ipc.Server: IPC Server handler 2 on 33901: exiting
    [junit] 12/08/09 10:35:02 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/09 10:35:02 INFO ipc.Server: Stopping IPC Server listener on 33901
    [junit] 12/08/09 10:35:02 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/08/09 10:35:02 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/09 10:35:02 INFO ipc.Server: IPC Server handler 1 on 33901: exiting
    [junit] 12/08/09 10:35:02 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:40861, storageID=DS-1774000341-67.195.138.20-40861-1344507982390, infoPort=55123, ipcPort=33901):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/08/09 10:35:02 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/08/09 10:35:02 INFO ipc.Server: IPC Server handler 0 on 33901: exiting
    [junit] 12/08/09 10:35:02 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/08/09 10:35:02 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:40861, storageID=DS-1774000341-67.195.138.20-40861-1344507982390, infoPort=55123, ipcPort=33901):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data1/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data2/current'}>
    [junit] 12/08/09 10:35:02 WARN util.MBeans: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.unRegisterMXBean(DataNode.java:513)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:726)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.run(DataNode.java:1442)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/08/09 10:35:02 INFO ipc.Server: Stopping server on 33901
    [junit] 12/08/09 10:35:02 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/09 10:35:02 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/09 10:35:02 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/08/09 10:35:02 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/08/09 10:35:02 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1781304042
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1781304042
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:129)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/09 10:35:02 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/09 10:35:02 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/09 10:35:02 WARN namenode.FSNamesystem: ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 12/08/09 10:35:02 INFO namenode.DecommissionManager: Interrupted Monitor
    [junit] java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.hdfs.server.namenode.DecommissionManager$Monitor.run(DecommissionManager.java:65)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/08/09 10:35:02 INFO namenode.FSNamesystem: Number of transactions: 382 Total time for transactions(ms): 9Number of transactions batched in Syncs: 147 Number of syncs: 288 SyncTimes(ms): 3166 159 
    [junit] 12/08/09 10:35:02 INFO ipc.Server: Stopping server on 32783
    [junit] 12/08/09 10:35:02 INFO ipc.Server: IPC Server handler 0 on 32783: exiting
    [junit] 12/08/09 10:35:02 INFO ipc.Server: IPC Server handler 1 on 32783: exiting
    [junit] 12/08/09 10:35:02 INFO ipc.Server: IPC Server handler 2 on 32783: exiting
    [junit] 12/08/09 10:35:02 INFO ipc.Server: IPC Server handler 3 on 32783: exiting
    [junit] 12/08/09 10:35:02 INFO ipc.Server: IPC Server handler 4 on 32783: exiting
    [junit] 12/08/09 10:35:02 INFO ipc.Server: IPC Server handler 5 on 32783: exiting
    [junit] 12/08/09 10:35:02 INFO ipc.Server: IPC Server handler 6 on 32783: exiting
    [junit] 12/08/09 10:35:02 INFO ipc.Server: IPC Server handler 8 on 32783: exiting
    [junit] 12/08/09 10:35:02 INFO ipc.Server: IPC Server handler 7 on 32783: exiting
    [junit] 12/08/09 10:35:02 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/09 10:35:02 INFO ipc.Server: IPC Server handler 9 on 32783: exiting
    [junit] 12/08/09 10:35:02 INFO ipc.Server: Stopping IPC Server listener on 32783
    [junit] 12/08/09 10:35:02 INFO ipc.Server: Stopping IPC Server Responder
    [junit] Tests run: 17, Failures: 3, Errors: 3, Time elapsed: 513.542 sec
    [junit] Test org.apache.pig.test.TestStore FAILED
    [junit] Running org.apache.pig.test.TestStringUDFs
    [junit] 12/08/09 10:35:03 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.NullPointerException
    [junit] 12/08/09 10:35:03 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/08/09 10:35:03 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -1
    [junit] 12/08/09 10:35:03 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -8
    [junit] 12/08/09 10:35:03 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/08/09 10:35:03 WARN builtin.INDEXOF: No logger object provided to UDF: org.apache.pig.builtin.INDEXOF. Failed to process input; error - null
    [junit] 12/08/09 10:35:03 WARN builtin.LAST_INDEX_OF: No logger object provided to UDF: org.apache.pig.builtin.LAST_INDEX_OF. Failed to process input; error - null
    [junit] Tests run: 11, Failures: 0, Errors: 0, Time elapsed: 0.102 sec
   [delete] Deleting directory /tmp/pig_junit_tmp221050452

BUILD FAILED
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:793: The following error occurred while executing this line:
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:851: Tests failed!

Total time: 22 minutes 46 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Jenkins build is back to normal : Pig-trunk #1341

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1341/changes>


Build failed in Jenkins: Pig-trunk #1340

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1340/changes>

Changes:

[jcoveney] PIG-2931: $ signs in the replacement string make parameter substitution fail (cheolsoo via jcoveney)

[jcoveney] Properly commit PIG-1283

[jcoveney] Properly commit PIG-2778

[jcoveney] PIG-1283: COUNT on null bag causes failure (analog.sony via jcoveney)

[jcoveney] PIG-2778: Add matches operator to predicate pushdown (cheolsoo via jcoveney)

[jcoveney] PIG-1283: COUNT on null bag causes failure (anand via jcoveney)

------------------------------------------
[...truncated 35815 lines...]
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/10/19 10:33:09 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/10/19 10:33:09 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] Shutting down DataNode 2
    [junit] 12/10/19 10:33:09 INFO ipc.Server: Stopping server on 50154
    [junit] 12/10/19 10:33:09 INFO ipc.Server: Stopping IPC Server listener on 50154
    [junit] 12/10/19 10:33:09 INFO ipc.Server: IPC Server handler 0 on 50154: exiting
    [junit] 12/10/19 10:33:09 INFO ipc.Server: IPC Server handler 1 on 50154: exiting
    [junit] 12/10/19 10:33:09 INFO ipc.Server: IPC Server handler 2 on 50154: exiting
    [junit] 12/10/19 10:33:09 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/10/19 10:33:09 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/19 10:33:09 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/10/19 10:33:09 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:33859, storageID=DS-265326286-67.195.138.20-33859-1350642304706, infoPort=59799, ipcPort=50154):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/10/19 10:33:09 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/10/19 10:33:10 INFO mapred.TaskTracker: Received 'KillJobAction' for job: job_20121019102505130_0012
    [junit] 12/10/19 10:33:10 WARN mapred.TaskTracker: Unknown job job_20121019102505130_0012 being deleted.
    [junit] 12/10/19 10:33:10 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/10/19 10:33:10 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/19 10:33:10 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:33859, storageID=DS-265326286-67.195.138.20-33859-1350642304706, infoPort=59799, ipcPort=50154):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data5/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data6/current'}>
    [junit] 12/10/19 10:33:10 INFO ipc.Server: Stopping server on 50154
    [junit] 12/10/19 10:33:10 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/19 10:33:10 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/19 10:33:10 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/10/19 10:33:10 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/10/19 10:33:10 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-2114208749
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-2114208749
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/10/19 10:33:10 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/10/19 10:33:10 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] Shutting down DataNode 1
    [junit] 12/10/19 10:33:10 INFO ipc.Server: Stopping server on 53622
    [junit] 12/10/19 10:33:10 INFO ipc.Server: IPC Server handler 0 on 53622: exiting
    [junit] 12/10/19 10:33:10 INFO ipc.Server: IPC Server handler 2 on 53622: exiting
    [junit] 12/10/19 10:33:10 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/10/19 10:33:10 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/19 10:33:10 INFO ipc.Server: IPC Server handler 1 on 53622: exiting
    [junit] 12/10/19 10:33:10 INFO ipc.Server: Stopping IPC Server listener on 53622
    [junit] 12/10/19 10:33:10 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:59054, storageID=DS-1357631513-67.195.138.20-59054-1350642304318, infoPort=42893, ipcPort=53622):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] Shutting down DataNode 0
    [junit] 12/10/19 10:33:10 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/10/19 10:33:10 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/10/19 10:33:10 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/10/19 10:33:10 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:59054, storageID=DS-1357631513-67.195.138.20-59054-1350642304318, infoPort=42893, ipcPort=53622):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data3/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data4/current'}>
    [junit] 12/10/19 10:33:10 INFO ipc.Server: Stopping server on 53622
    [junit] 12/10/19 10:33:10 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/19 10:33:10 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/19 10:33:10 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/10/19 10:33:10 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/10/19 10:33:10 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1988836881
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1988836881
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/10/19 10:33:10 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/10/19 10:33:10 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/10/19 10:33:11 INFO ipc.Server: Stopping server on 44272
    [junit] 12/10/19 10:33:11 INFO ipc.Server: IPC Server handler 2 on 44272: exiting
    [junit] 12/10/19 10:33:11 INFO ipc.Server: IPC Server handler 0 on 44272: exiting
    [junit] 12/10/19 10:33:11 INFO ipc.Server: Stopping IPC Server listener on 44272
    [junit] 12/10/19 10:33:11 INFO ipc.Server: IPC Server handler 1 on 44272: exiting
    [junit] 12/10/19 10:33:11 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/19 10:33:11 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/10/19 10:33:11 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/10/19 10:33:11 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:54004, storageID=DS-1200933640-67.195.138.20-54004-1350642303917, infoPort=49479, ipcPort=44272):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/10/19 10:33:11 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/10/19 10:33:11 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:59054 to delete  blk_315158172127505469_1134 blk_4093151092393475305_1133 blk_4066341243358867538_1127
    [junit] 12/10/19 10:33:11 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:54004 to delete  blk_-8562916814900924673_1134 blk_4066341243358867538_1127
    [junit] 12/10/19 10:33:12 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/10/19 10:33:12 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/19 10:33:12 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:54004, storageID=DS-1200933640-67.195.138.20-54004-1350642303917, infoPort=49479, ipcPort=44272):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data1/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data2/current'}>
    [junit] 12/10/19 10:33:12 WARN util.MBeans: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.unRegisterMXBean(DataNode.java:513)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:726)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.run(DataNode.java:1442)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/10/19 10:33:12 INFO ipc.Server: Stopping server on 44272
    [junit] 12/10/19 10:33:12 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/19 10:33:12 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/19 10:33:12 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/10/19 10:33:12 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/10/19 10:33:12 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-709792713
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-709792713
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/10/19 10:33:12 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/10/19 10:33:12 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/10/19 10:33:12 WARN namenode.FSNamesystem: ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 12/10/19 10:33:12 INFO namenode.DecommissionManager: Interrupted Monitor
    [junit] java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.hdfs.server.namenode.DecommissionManager$Monitor.run(DecommissionManager.java:65)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/10/19 10:33:12 INFO namenode.FSNamesystem: Number of transactions: 629 Total time for transactions(ms): 14Number of transactions batched in Syncs: 124 Number of syncs: 431 SyncTimes(ms): 5858 307 
    [junit] 12/10/19 10:33:12 INFO ipc.Server: Stopping server on 36926
    [junit] 12/10/19 10:33:12 INFO ipc.Server: IPC Server handler 0 on 36926: exiting
    [junit] 12/10/19 10:33:12 INFO ipc.Server: IPC Server handler 1 on 36926: exiting
    [junit] 12/10/19 10:33:12 INFO ipc.Server: IPC Server handler 2 on 36926: exiting
    [junit] 12/10/19 10:33:12 INFO ipc.Server: IPC Server handler 3 on 36926: exiting
    [junit] 12/10/19 10:33:12 INFO ipc.Server: IPC Server handler 5 on 36926: exiting
    [junit] 12/10/19 10:33:12 INFO ipc.Server: IPC Server handler 7 on 36926: exiting
    [junit] 12/10/19 10:33:12 INFO ipc.Server: IPC Server handler 9 on 36926: exiting
    [junit] 12/10/19 10:33:12 INFO ipc.Server: IPC Server handler 8 on 36926: exiting
    [junit] 12/10/19 10:33:12 INFO ipc.Server: Stopping IPC Server listener on 36926
    [junit] 12/10/19 10:33:12 INFO ipc.Server: IPC Server handler 4 on 36926: exiting
    [junit] 12/10/19 10:33:12 INFO ipc.Server: IPC Server handler 6 on 36926: exiting
    [junit] 12/10/19 10:33:12 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/19 10:33:12 INFO ipc.Server: Stopping IPC Server Responder
    [junit] Tests run: 17, Failures: 4, Errors: 3, Time elapsed: 481.568 sec
    [junit] Test org.apache.pig.test.TestStore FAILED
    [junit] Running org.apache.pig.test.TestStringUDFs
    [junit] 12/10/19 10:33:13 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.NullPointerException
    [junit] 12/10/19 10:33:13 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/10/19 10:33:13 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -1
    [junit] 12/10/19 10:33:13 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -8
    [junit] 12/10/19 10:33:13 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/10/19 10:33:13 WARN builtin.INDEXOF: No logger object provided to UDF: org.apache.pig.builtin.INDEXOF. Failed to process input; error - null
    [junit] 12/10/19 10:33:13 WARN builtin.LAST_INDEX_OF: No logger object provided to UDF: org.apache.pig.builtin.LAST_INDEX_OF. Failed to process input; error - null
    [junit] Tests run: 12, Failures: 0, Errors: 0, Time elapsed: 0.229 sec
   [delete] Deleting directory /tmp/pig_junit_tmp650391535

BUILD FAILED
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:815: The following error occurred while executing this line:
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:884: Tests failed!

Total time: 20 minutes 1 second
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1339

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1339/changes>

Changes:

[jcoveney] Fix CHANGES.txt (jcoveney)

[jcoveney] Fix CHANGES.txt (jcoveney)

[jcoveney] PIG-2958: Pig tests do not appear to have a logger attached (daijyc via jcoveney)

[jcoveney] PIG-2972: TestPoissonSampleLoader failing on rhel environment (jcoveney)

[gdfm] Fixed problems with CHANGES.txt

[gdfm] PIG-2985: TestRank1,2,3 fail with hadoop-2.0.x (rohini via azaroth)

------------------------------------------
[...truncated 6629 lines...]
 [findbugs]   org.apache.hadoop.fs.FSDataInputStream
 [findbugs]   org.python.core.PyObject
 [findbugs]   jline.History
 [findbugs]   org.jruby.embed.internal.LocalContextProvider
 [findbugs]   org.apache.hadoop.io.BooleanWritable
 [findbugs]   org.apache.log4j.Logger
 [findbugs]   org.apache.hadoop.hbase.filter.FamilyFilter
 [findbugs]   org.codehaus.jackson.annotate.JsonPropertyOrder
 [findbugs]   groovy.lang.Tuple
 [findbugs]   org.antlr.runtime.IntStream
 [findbugs]   org.apache.hadoop.util.ReflectionUtils
 [findbugs]   org.apache.hadoop.fs.ContentSummary
 [findbugs]   org.jruby.runtime.builtin.IRubyObject
 [findbugs]   org.jruby.RubyInteger
 [findbugs]   org.python.core.PyTuple
 [findbugs]   org.mortbay.log.Log
 [findbugs]   org.apache.hadoop.conf.Configuration
 [findbugs]   com.google.common.base.Joiner
 [findbugs]   org.apache.hadoop.mapreduce.lib.input.FileSplit
 [findbugs]   org.apache.hadoop.mapred.Counters$Counter
 [findbugs]   com.jcraft.jsch.Channel
 [findbugs]   org.apache.hadoop.mapred.JobPriority
 [findbugs]   org.apache.commons.cli.Options
 [findbugs]   org.apache.hadoop.mapred.JobID
 [findbugs]   org.apache.hadoop.util.bloom.BloomFilter
 [findbugs]   org.python.core.PyFrame
 [findbugs]   org.apache.hadoop.hbase.filter.CompareFilter
 [findbugs]   org.apache.hadoop.util.VersionInfo
 [findbugs]   org.python.core.PyString
 [findbugs]   org.apache.hadoop.io.Text$Comparator
 [findbugs]   org.jruby.runtime.Block
 [findbugs]   org.antlr.runtime.MismatchedSetException
 [findbugs]   org.apache.hadoop.io.BytesWritable
 [findbugs]   org.apache.hadoop.fs.FsShell
 [findbugs]   org.joda.time.Months
 [findbugs]   org.mozilla.javascript.ImporterTopLevel
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableOutputFormat
 [findbugs]   org.apache.hadoop.mapred.TaskReport
 [findbugs]   org.apache.hadoop.security.UserGroupInformation
 [findbugs]   org.antlr.runtime.tree.RewriteRuleSubtreeStream
 [findbugs]   org.apache.commons.cli.HelpFormatter
 [findbugs]   com.google.common.collect.Maps
 [findbugs]   org.joda.time.ReadableInstant
 [findbugs]   org.mozilla.javascript.NativeObject
 [findbugs]   org.apache.hadoop.hbase.HConstants
 [findbugs]   org.apache.hadoop.io.serializer.Deserializer
 [findbugs]   org.antlr.runtime.FailedPredicateException
 [findbugs]   org.apache.hadoop.io.compress.CompressionCodec
 [findbugs]   org.jruby.RubyNil
 [findbugs]   org.apache.hadoop.fs.FileStatus
 [findbugs]   org.apache.hadoop.hbase.client.Result
 [findbugs]   org.apache.hadoop.mapreduce.JobContext
 [findbugs]   org.codehaus.jackson.JsonGenerator
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptContext
 [findbugs]   org.apache.hadoop.io.BytesWritable$Comparator
 [findbugs]   org.apache.hadoop.io.LongWritable$Comparator
 [findbugs]   org.codehaus.jackson.map.util.LRUMap
 [findbugs]   org.apache.hadoop.hbase.util.Bytes
 [findbugs]   org.antlr.runtime.MismatchedTokenException
 [findbugs]   org.codehaus.jackson.JsonParser
 [findbugs]   com.jcraft.jsch.UserInfo
 [findbugs]   org.python.core.PyException
 [findbugs]   org.apache.commons.cli.ParseException
 [findbugs]   org.apache.hadoop.io.compress.CompressionOutputStream
 [findbugs]   org.apache.hadoop.hbase.filter.WritableByteArrayComparable
 [findbugs]   org.antlr.runtime.tree.CommonTreeNodeStream
 [findbugs]   org.apache.log4j.Level
 [findbugs]   org.apache.hadoop.hbase.client.Scan
 [findbugs]   org.jruby.anno.JRubyMethod
 [findbugs]   org.apache.hadoop.mapreduce.Job
 [findbugs]   com.google.common.util.concurrent.Futures
 [findbugs]   org.apache.commons.logging.LogFactory
 [findbugs]   org.apache.commons.collections.IteratorUtils
 [findbugs]   org.apache.commons.codec.binary.Base64
 [findbugs]   org.codehaus.jackson.map.ObjectMapper
 [findbugs]   org.apache.hadoop.fs.FileSystem
 [findbugs]   org.jruby.embed.LocalContextScope
 [findbugs]   org.apache.hadoop.hbase.filter.FilterList$Operator
 [findbugs]   org.jruby.RubySymbol
 [findbugs]   org.apache.hadoop.hbase.io.ImmutableBytesWritable
 [findbugs]   org.apache.hadoop.io.serializer.SerializationFactory
 [findbugs]   org.antlr.runtime.tree.TreeAdaptor
 [findbugs]   org.apache.hadoop.mapred.RunningJob
 [findbugs]   org.antlr.runtime.CommonTokenStream
 [findbugs]   org.apache.hadoop.io.DataInputBuffer
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile
 [findbugs]   org.apache.commons.cli.GnuParser
 [findbugs]   org.mozilla.javascript.Context
 [findbugs]   org.apache.hadoop.io.FloatWritable
 [findbugs]   org.antlr.runtime.tree.RewriteEarlyExitException
 [findbugs]   org.apache.hadoop.hbase.HBaseConfiguration
 [findbugs]   org.codehaus.jackson.JsonGenerationException
 [findbugs]   org.apache.hadoop.mapreduce.TaskInputOutputContext
 [findbugs]   org.apache.hadoop.io.compress.GzipCodec
 [findbugs]   org.jruby.RubyString
 [findbugs]   org.antlr.runtime.BaseRecognizer
 [findbugs]   org.apache.hadoop.fs.FileUtil
 [findbugs]   org.apache.hadoop.fs.Path
 [findbugs]   org.jruby.RubyFixnum
 [findbugs]   org.apache.hadoop.hbase.client.Put
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Writer
 [findbugs]   jline.ConsoleReader
 [findbugs]   com.google.common.collect.Lists
 [findbugs]   org.apache.hadoop.mapreduce.MapContext
 [findbugs]   org.python.core.PyJavaPackage
 [findbugs]   org.apache.hadoop.hbase.filter.ColumnPrefixFilter
 [findbugs]   org.python.core.PyStringMap
 [findbugs]   groovy.util.ResourceException
 [findbugs]   org.joda.time.format.DateTimeFormat
 [findbugs]   org.apache.hadoop.mapreduce.TaskID
 [findbugs]   org.joda.time.Period
 [findbugs]   org.jruby.embed.ScriptingContainer
 [findbugs]   org.jruby.Ruby
 [findbugs]   groovy.util.GroovyScriptEngine
 [findbugs]   org.apache.hadoop.hbase.client.HTable
 [findbugs]   org.apache.hadoop.io.FloatWritable$Comparator
 [findbugs]   org.apache.zookeeper.ZooKeeper
 [findbugs]   org.codehaus.jackson.map.JsonMappingException
 [findbugs]   org.python.core.PyFunction
 [findbugs]   org.antlr.runtime.TokenSource
 [findbugs]   com.google.common.io.Files
 [findbugs]   groovy.util.ScriptException
 [findbugs]   com.jcraft.jsch.ChannelDirectTCPIP
 [findbugs]   com.jcraft.jsch.JSchException
 [findbugs]   org.python.util.PythonInterpreter
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil
 [findbugs]   org.python.core.PyInteger
 [findbugs]   org.apache.hadoop.mapred.JobConf
 [findbugs]   org.joda.time.DateTime
 [findbugs]   org.apache.hadoop.util.bloom.Key
 [findbugs]   org.joda.time.Years
 [findbugs]   org.apache.hadoop.io.Text
 [findbugs]   org.antlr.runtime.NoViableAltException
 [findbugs]   org.apache.hadoop.util.GenericOptionsParser
 [findbugs]   org.apache.hadoop.mapreduce.JobID
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptID
 [findbugs]   org.apache.hadoop.filecache.DistributedCache
 [findbugs]   org.joda.time.DateTimeZone
 [findbugs]   org.apache.hadoop.fs.FSDataOutputStream
 [findbugs]   org.python.core.PyList
 [findbugs]   org.antlr.runtime.tree.TreeNodeStream
 [findbugs]   org.apache.hadoop.hbase.filter.BinaryComparator
 [findbugs]   dk.brics.automaton.RegExp
 [findbugs]   org.mozilla.javascript.Scriptable
 [findbugs]   org.mozilla.javascript.EcmaError
 [findbugs]   org.apache.hadoop.io.serializer.Serializer
 [findbugs]   org.fusesource.jansi.AnsiConsole
 [findbugs]   org.apache.hadoop.util.bloom.Filter
 [findbugs]   org.python.core.PyNone
 [findbugs]   org.mozilla.javascript.Function
 [findbugs]   org.python.core.PySystemState
 [findbugs]   org.antlr.runtime.RecognizerSharedState
 [findbugs]   org.codehaus.jackson.JsonFactory
 [findbugs]   org.antlr.runtime.EarlyExitException
 [findbugs]   org.apache.hadoop.hdfs.DistributedFileSystem
 [findbugs]   org.apache.hadoop.util.LineReader
 [findbugs] Warnings generated: 51
 [findbugs] Missing classes: 286
 [findbugs] Calculating exit code...
 [findbugs] Setting 'missing class' flag (2)
 [findbugs] Setting 'bugs found' flag (1)
 [findbugs] Exit code set to: 3
 [findbugs] Java Result: 3
 [findbugs] Classes needed for analysis were missing
 [findbugs] Output saved to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml>
     [xslt] Processing <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml> to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.html>
     [xslt] Loading stylesheet /home/jenkins/tools/findbugs/latest/src/xsl/default.xsl

BUILD SUCCESSFUL
Total time: 5 minutes 27 seconds


======================================================================
======================================================================
STORE: saving artifacts
======================================================================
======================================================================




======================================================================
======================================================================
CLEAN: cleaning workspace
======================================================================
======================================================================


Buildfile: build.xml

clean:
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src-gen>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/docs/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/test/org/apache/pig/test/utils/dotGraph/parser>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig.jar>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig-withouthadoop.jar>

clean:

clean:

BUILD SUCCESSFUL
Total time: 0 seconds


======================================================================
======================================================================
ANALYSIS: ant -Drun.clover=true -Dclover.home=/homes/hudson/tools/clover/latest clover test-commit generate-clover-reports -Dtest.junit.output.format=xml -Dtest.output=yes -Dversion=${BUILD_ID} -Dfindbugs.home=$FINDBUGS_HOME -Djava5.home=$JAVA5_HOME -Dforrest.home=$FORREST_HOME -Dclover.home=$CLOVER_HOME -Declipse.home=$ECLIPSE_HOME
======================================================================
======================================================================


Buildfile: build.xml

clover.setup:
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/clover/db>
[clover-setup] Clover Version 3.1.0, built on May 31 2011 (build-821)
[clover-setup] Loaded from: /home/jenkins/tools/clover/latest/lib/clover.jar

BUILD FAILED
java.lang.RuntimeException: Clover upgrades for your license ended December 14 2010, and this version of Clover was built May 31 2011. Please visit http://www.atlassian.com/clover/renew for information on upgrading your license.
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:103)
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:25)
	at com.cenqua.clover.tasks.AbstractCloverTask.execute(AbstractCloverTask.java:52)
	at org.apache.tools.ant.UnknownElement.execute(UnknownElement.java:288)
	at sun.reflect.GeneratedMethodAccessor1.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
	at java.lang.reflect.Method.invoke(Method.java:597)
	at org.apache.tools.ant.dispatch.DispatchUtils.execute(DispatchUtils.java:106)
	at org.apache.tools.ant.Task.perform(Task.java:348)
	at org.apache.tools.ant.Target.execute(Target.java:357)
	at org.apache.tools.ant.Target.performTasks(Target.java:385)
	at org.apache.tools.ant.Project.executeSortedTargets(Project.java:1337)
	at org.apache.tools.ant.Project.executeTarget(Project.java:1306)
	at org.apache.tools.ant.helper.DefaultExecutor.executeTargets(DefaultExecutor.java:41)
	at org.apache.tools.ant.Project.executeTargets(Project.java:1189)
	at org.apache.tools.ant.Main.runBuild(Main.java:758)
	at org.apache.tools.ant.Main.startAnt(Main.java:217)
	at org.apache.tools.ant.launch.Launcher.run(Launcher.java:257)
	at org.apache.tools.ant.launch.Launcher.main(Launcher.java:104)

Total time: 0 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1338

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1338/changes>

Changes:

[billgraham] moving PIG-2976 entry under Pig 0.11 section

[billgraham] PIG-2976: Reduce HBaseStorage logging (billgraham)

[gdfm] PIG-2947: Documentation for Rank operator (xalan via azaroth)

[dvryaboy] PIG-2943: DevTests, Refactor Windows checks to use new Util.WINDOWS method for code health

------------------------------------------
[...truncated 37582 lines...]
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/10/17 10:33:25 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/10/17 10:33:25 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] Shutting down DataNode 2
    [junit] 12/10/17 10:33:25 INFO ipc.Server: Stopping server on 34587
    [junit] 12/10/17 10:33:25 INFO ipc.Server: IPC Server handler 2 on 34587: exiting
    [junit] 12/10/17 10:33:25 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/17 10:33:25 INFO ipc.Server: IPC Server handler 1 on 34587: exiting
    [junit] 12/10/17 10:33:25 INFO ipc.Server: Stopping IPC Server listener on 34587
    [junit] 12/10/17 10:33:25 INFO ipc.Server: IPC Server handler 0 on 34587: exiting
    [junit] 12/10/17 10:33:25 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/10/17 10:33:25 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:46663, storageID=DS-369403952-67.195.138.20-46663-1350469513420, infoPort=44913, ipcPort=34587):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/10/17 10:33:25 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/10/17 10:33:25 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/10/17 10:33:26 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:54238 to delete  blk_-7980504267615956416_1134 blk_-854542032623531907_1134 blk_-1050888473204143267_1127 blk_-9208145603105516998_1133
    [junit] 12/10/17 10:33:26 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:45243 to delete  blk_-7980504267615956416_1134 blk_-854542032623531907_1134 blk_-1050888473204143267_1127 blk_-9208145603105516998_1133
    [junit] 12/10/17 10:33:26 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/10/17 10:33:26 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/17 10:33:26 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:46663, storageID=DS-369403952-67.195.138.20-46663-1350469513420, infoPort=44913, ipcPort=34587):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data5/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data6/current'}>
    [junit] 12/10/17 10:33:26 INFO ipc.Server: Stopping server on 34587
    [junit] 12/10/17 10:33:26 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/17 10:33:26 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/17 10:33:26 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/10/17 10:33:26 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/10/17 10:33:26 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-255695494
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-255695494
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/10/17 10:33:26 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] Shutting down DataNode 1
    [junit] 12/10/17 10:33:26 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/10/17 10:33:26 INFO ipc.Server: Stopping server on 40013
    [junit] 12/10/17 10:33:26 INFO ipc.Server: IPC Server handler 0 on 40013: exiting
    [junit] 12/10/17 10:33:26 INFO ipc.Server: IPC Server handler 1 on 40013: exiting
    [junit] 12/10/17 10:33:26 INFO ipc.Server: IPC Server handler 2 on 40013: exiting
    [junit] 12/10/17 10:33:26 INFO ipc.Server: Stopping IPC Server listener on 40013
    [junit] 12/10/17 10:33:26 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/17 10:33:26 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/10/17 10:33:26 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/10/17 10:33:26 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:54238, storageID=DS-2078851495-67.195.138.20-54238-1350469513040, infoPort=35583, ipcPort=40013):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/10/17 10:33:26 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/10/17 10:33:27 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/10/17 10:33:27 INFO datanode.DataNode: Scheduling block blk_-9208145603105516998_1133 file build/test/data/dfs/data/data1/current/blk_-9208145603105516998 for deletion
    [junit] 12/10/17 10:33:27 INFO datanode.DataNode: Scheduling block blk_-7980504267615956416_1134 file build/test/data/dfs/data/data2/current/blk_-7980504267615956416 for deletion
    [junit] 12/10/17 10:33:27 INFO datanode.DataNode: Deleted block blk_-9208145603105516998_1133 at file build/test/data/dfs/data/data1/current/blk_-9208145603105516998
    [junit] 12/10/17 10:33:27 INFO datanode.DataNode: Deleted block blk_-7980504267615956416_1134 at file build/test/data/dfs/data/data2/current/blk_-7980504267615956416
    [junit] 12/10/17 10:33:27 INFO datanode.DataNode: Scheduling block blk_-1050888473204143267_1127 file build/test/data/dfs/data/data2/current/blk_-1050888473204143267 for deletion
    [junit] 12/10/17 10:33:27 INFO datanode.DataNode: Deleted block blk_-1050888473204143267_1127 at file build/test/data/dfs/data/data2/current/blk_-1050888473204143267
    [junit] 12/10/17 10:33:27 INFO datanode.DataNode: Scheduling block blk_-854542032623531907_1134 file build/test/data/dfs/data/data1/current/blk_-854542032623531907 for deletion
    [junit] 12/10/17 10:33:27 INFO datanode.DataNode: Deleted block blk_-854542032623531907_1134 at file build/test/data/dfs/data/data1/current/blk_-854542032623531907
    [junit] 12/10/17 10:33:27 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/17 10:33:27 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:54238, storageID=DS-2078851495-67.195.138.20-54238-1350469513040, infoPort=35583, ipcPort=40013):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data3/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data4/current'}>
    [junit] 12/10/17 10:33:27 INFO ipc.Server: Stopping server on 40013
    [junit] 12/10/17 10:33:27 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/17 10:33:27 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/17 10:33:27 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/10/17 10:33:27 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/10/17 10:33:27 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId853001351
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId853001351
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/10/17 10:33:27 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] Shutting down DataNode 0
    [junit] 12/10/17 10:33:27 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/10/17 10:33:28 INFO ipc.Server: Stopping server on 36080
    [junit] 12/10/17 10:33:28 INFO ipc.Server: IPC Server handler 0 on 36080: exiting
    [junit] 12/10/17 10:33:28 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/17 10:33:28 INFO ipc.Server: IPC Server handler 2 on 36080: exiting
    [junit] 12/10/17 10:33:28 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/10/17 10:33:28 INFO ipc.Server: Stopping IPC Server listener on 36080
    [junit] 12/10/17 10:33:28 INFO ipc.Server: IPC Server handler 1 on 36080: exiting
    [junit] 12/10/17 10:33:28 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:45243, storageID=DS-285321101-67.195.138.20-45243-1350469512416, infoPort=48278, ipcPort=36080):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/10/17 10:33:28 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/10/17 10:33:28 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/10/17 10:33:28 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/10/17 10:33:29 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/17 10:33:29 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:45243, storageID=DS-285321101-67.195.138.20-45243-1350469512416, infoPort=48278, ipcPort=36080):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data1/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data2/current'}>
    [junit] 12/10/17 10:33:29 WARN util.MBeans: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.unRegisterMXBean(DataNode.java:513)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:726)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.run(DataNode.java:1442)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/10/17 10:33:29 INFO ipc.Server: Stopping server on 36080
    [junit] 12/10/17 10:33:29 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/17 10:33:29 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/17 10:33:29 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/10/17 10:33:29 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/10/17 10:33:29 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-562303601
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-562303601
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/10/17 10:33:29 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/10/17 10:33:29 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/10/17 10:33:29 INFO namenode.DecommissionManager: Interrupted Monitor
    [junit] java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.hdfs.server.namenode.DecommissionManager$Monitor.run(DecommissionManager.java:65)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/10/17 10:33:29 WARN namenode.FSNamesystem: ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 12/10/17 10:33:29 INFO namenode.FSNamesystem: Number of transactions: 629 Total time for transactions(ms): 10Number of transactions batched in Syncs: 133 Number of syncs: 431 SyncTimes(ms): 7247 312 
    [junit] 12/10/17 10:33:29 INFO ipc.Server: Stopping server on 53153
    [junit] 12/10/17 10:33:29 INFO ipc.Server: IPC Server handler 0 on 53153: exiting
    [junit] 12/10/17 10:33:29 INFO ipc.Server: IPC Server handler 2 on 53153: exiting
    [junit] 12/10/17 10:33:29 INFO ipc.Server: IPC Server handler 3 on 53153: exiting
    [junit] 12/10/17 10:33:29 INFO ipc.Server: IPC Server handler 1 on 53153: exiting
    [junit] 12/10/17 10:33:29 INFO ipc.Server: IPC Server handler 4 on 53153: exiting
    [junit] 12/10/17 10:33:29 INFO ipc.Server: IPC Server handler 9 on 53153: exiting
    [junit] 12/10/17 10:33:29 INFO ipc.Server: IPC Server handler 6 on 53153: exiting
    [junit] 12/10/17 10:33:29 INFO ipc.Server: IPC Server handler 5 on 53153: exiting
    [junit] 12/10/17 10:33:29 INFO ipc.Server: IPC Server handler 7 on 53153: exiting
    [junit] 12/10/17 10:33:29 INFO ipc.Server: Stopping IPC Server listener on 53153
    [junit] 12/10/17 10:33:29 INFO ipc.Server: IPC Server handler 8 on 53153: exiting
    [junit] 12/10/17 10:33:29 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/17 10:33:29 INFO ipc.Server: Stopping IPC Server Responder
    [junit] Tests run: 17, Failures: 4, Errors: 3, Time elapsed: 489.742 sec
    [junit] Test org.apache.pig.test.TestStore FAILED
    [junit] Running org.apache.pig.test.TestStringUDFs
    [junit] 12/10/17 10:33:30 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.NullPointerException
    [junit] 12/10/17 10:33:30 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/10/17 10:33:30 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -1
    [junit] 12/10/17 10:33:30 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -8
    [junit] 12/10/17 10:33:30 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/10/17 10:33:30 WARN builtin.INDEXOF: No logger object provided to UDF: org.apache.pig.builtin.INDEXOF. Failed to process input; error - null
    [junit] 12/10/17 10:33:30 WARN builtin.LAST_INDEX_OF: No logger object provided to UDF: org.apache.pig.builtin.LAST_INDEX_OF. Failed to process input; error - null
    [junit] Tests run: 12, Failures: 0, Errors: 0, Time elapsed: 0.229 sec
   [delete] Deleting directory /tmp/pig_junit_tmp503174691

BUILD FAILED
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:815: The following error occurred while executing this line:
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:884: Tests failed!

Total time: 20 minutes 45 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1337

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1337/changes>

Changes:

[sms] PIG-2966: Test failures on CentOS 6 because MALLOC_ARENA_MAX is not set (cheolsoo via sms)

[gates] PIG-2794 Pig test: add utils to simplify testing on Windows

------------------------------------------
[...truncated 6629 lines...]
 [findbugs]   org.apache.hadoop.fs.FSDataInputStream
 [findbugs]   org.python.core.PyObject
 [findbugs]   jline.History
 [findbugs]   org.jruby.embed.internal.LocalContextProvider
 [findbugs]   org.apache.hadoop.io.BooleanWritable
 [findbugs]   org.apache.log4j.Logger
 [findbugs]   org.apache.hadoop.hbase.filter.FamilyFilter
 [findbugs]   org.codehaus.jackson.annotate.JsonPropertyOrder
 [findbugs]   groovy.lang.Tuple
 [findbugs]   org.antlr.runtime.IntStream
 [findbugs]   org.apache.hadoop.util.ReflectionUtils
 [findbugs]   org.apache.hadoop.fs.ContentSummary
 [findbugs]   org.jruby.runtime.builtin.IRubyObject
 [findbugs]   org.jruby.RubyInteger
 [findbugs]   org.python.core.PyTuple
 [findbugs]   org.mortbay.log.Log
 [findbugs]   org.apache.hadoop.conf.Configuration
 [findbugs]   com.google.common.base.Joiner
 [findbugs]   org.apache.hadoop.mapreduce.lib.input.FileSplit
 [findbugs]   org.apache.hadoop.mapred.Counters$Counter
 [findbugs]   com.jcraft.jsch.Channel
 [findbugs]   org.apache.hadoop.mapred.JobPriority
 [findbugs]   org.apache.commons.cli.Options
 [findbugs]   org.apache.hadoop.mapred.JobID
 [findbugs]   org.apache.hadoop.util.bloom.BloomFilter
 [findbugs]   org.python.core.PyFrame
 [findbugs]   org.apache.hadoop.hbase.filter.CompareFilter
 [findbugs]   org.apache.hadoop.util.VersionInfo
 [findbugs]   org.python.core.PyString
 [findbugs]   org.apache.hadoop.io.Text$Comparator
 [findbugs]   org.jruby.runtime.Block
 [findbugs]   org.antlr.runtime.MismatchedSetException
 [findbugs]   org.apache.hadoop.io.BytesWritable
 [findbugs]   org.apache.hadoop.fs.FsShell
 [findbugs]   org.joda.time.Months
 [findbugs]   org.mozilla.javascript.ImporterTopLevel
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableOutputFormat
 [findbugs]   org.apache.hadoop.mapred.TaskReport
 [findbugs]   org.apache.hadoop.security.UserGroupInformation
 [findbugs]   org.antlr.runtime.tree.RewriteRuleSubtreeStream
 [findbugs]   org.apache.commons.cli.HelpFormatter
 [findbugs]   com.google.common.collect.Maps
 [findbugs]   org.joda.time.ReadableInstant
 [findbugs]   org.mozilla.javascript.NativeObject
 [findbugs]   org.apache.hadoop.hbase.HConstants
 [findbugs]   org.apache.hadoop.io.serializer.Deserializer
 [findbugs]   org.antlr.runtime.FailedPredicateException
 [findbugs]   org.apache.hadoop.io.compress.CompressionCodec
 [findbugs]   org.jruby.RubyNil
 [findbugs]   org.apache.hadoop.fs.FileStatus
 [findbugs]   org.apache.hadoop.hbase.client.Result
 [findbugs]   org.apache.hadoop.mapreduce.JobContext
 [findbugs]   org.codehaus.jackson.JsonGenerator
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptContext
 [findbugs]   org.apache.hadoop.io.BytesWritable$Comparator
 [findbugs]   org.apache.hadoop.io.LongWritable$Comparator
 [findbugs]   org.codehaus.jackson.map.util.LRUMap
 [findbugs]   org.apache.hadoop.hbase.util.Bytes
 [findbugs]   org.antlr.runtime.MismatchedTokenException
 [findbugs]   org.codehaus.jackson.JsonParser
 [findbugs]   com.jcraft.jsch.UserInfo
 [findbugs]   org.python.core.PyException
 [findbugs]   org.apache.commons.cli.ParseException
 [findbugs]   org.apache.hadoop.io.compress.CompressionOutputStream
 [findbugs]   org.apache.hadoop.hbase.filter.WritableByteArrayComparable
 [findbugs]   org.antlr.runtime.tree.CommonTreeNodeStream
 [findbugs]   org.apache.log4j.Level
 [findbugs]   org.apache.hadoop.hbase.client.Scan
 [findbugs]   org.jruby.anno.JRubyMethod
 [findbugs]   org.apache.hadoop.mapreduce.Job
 [findbugs]   com.google.common.util.concurrent.Futures
 [findbugs]   org.apache.commons.logging.LogFactory
 [findbugs]   org.apache.commons.collections.IteratorUtils
 [findbugs]   org.apache.commons.codec.binary.Base64
 [findbugs]   org.codehaus.jackson.map.ObjectMapper
 [findbugs]   org.apache.hadoop.fs.FileSystem
 [findbugs]   org.jruby.embed.LocalContextScope
 [findbugs]   org.apache.hadoop.hbase.filter.FilterList$Operator
 [findbugs]   org.jruby.RubySymbol
 [findbugs]   org.apache.hadoop.hbase.io.ImmutableBytesWritable
 [findbugs]   org.apache.hadoop.io.serializer.SerializationFactory
 [findbugs]   org.antlr.runtime.tree.TreeAdaptor
 [findbugs]   org.apache.hadoop.mapred.RunningJob
 [findbugs]   org.antlr.runtime.CommonTokenStream
 [findbugs]   org.apache.hadoop.io.DataInputBuffer
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile
 [findbugs]   org.apache.commons.cli.GnuParser
 [findbugs]   org.mozilla.javascript.Context
 [findbugs]   org.apache.hadoop.io.FloatWritable
 [findbugs]   org.antlr.runtime.tree.RewriteEarlyExitException
 [findbugs]   org.apache.hadoop.hbase.HBaseConfiguration
 [findbugs]   org.codehaus.jackson.JsonGenerationException
 [findbugs]   org.apache.hadoop.mapreduce.TaskInputOutputContext
 [findbugs]   org.apache.hadoop.io.compress.GzipCodec
 [findbugs]   org.jruby.RubyString
 [findbugs]   org.antlr.runtime.BaseRecognizer
 [findbugs]   org.apache.hadoop.fs.FileUtil
 [findbugs]   org.apache.hadoop.fs.Path
 [findbugs]   org.jruby.RubyFixnum
 [findbugs]   org.apache.hadoop.hbase.client.Put
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Writer
 [findbugs]   jline.ConsoleReader
 [findbugs]   com.google.common.collect.Lists
 [findbugs]   org.apache.hadoop.mapreduce.MapContext
 [findbugs]   org.python.core.PyJavaPackage
 [findbugs]   org.apache.hadoop.hbase.filter.ColumnPrefixFilter
 [findbugs]   org.python.core.PyStringMap
 [findbugs]   groovy.util.ResourceException
 [findbugs]   org.joda.time.format.DateTimeFormat
 [findbugs]   org.apache.hadoop.mapreduce.TaskID
 [findbugs]   org.joda.time.Period
 [findbugs]   org.jruby.embed.ScriptingContainer
 [findbugs]   org.jruby.Ruby
 [findbugs]   groovy.util.GroovyScriptEngine
 [findbugs]   org.apache.hadoop.hbase.client.HTable
 [findbugs]   org.apache.hadoop.io.FloatWritable$Comparator
 [findbugs]   org.apache.zookeeper.ZooKeeper
 [findbugs]   org.codehaus.jackson.map.JsonMappingException
 [findbugs]   org.python.core.PyFunction
 [findbugs]   org.antlr.runtime.TokenSource
 [findbugs]   com.google.common.io.Files
 [findbugs]   groovy.util.ScriptException
 [findbugs]   com.jcraft.jsch.ChannelDirectTCPIP
 [findbugs]   com.jcraft.jsch.JSchException
 [findbugs]   org.python.util.PythonInterpreter
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil
 [findbugs]   org.python.core.PyInteger
 [findbugs]   org.apache.hadoop.mapred.JobConf
 [findbugs]   org.joda.time.DateTime
 [findbugs]   org.apache.hadoop.util.bloom.Key
 [findbugs]   org.joda.time.Years
 [findbugs]   org.apache.hadoop.io.Text
 [findbugs]   org.antlr.runtime.NoViableAltException
 [findbugs]   org.apache.hadoop.util.GenericOptionsParser
 [findbugs]   org.apache.hadoop.mapreduce.JobID
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptID
 [findbugs]   org.apache.hadoop.filecache.DistributedCache
 [findbugs]   org.joda.time.DateTimeZone
 [findbugs]   org.apache.hadoop.fs.FSDataOutputStream
 [findbugs]   org.python.core.PyList
 [findbugs]   org.antlr.runtime.tree.TreeNodeStream
 [findbugs]   org.apache.hadoop.hbase.filter.BinaryComparator
 [findbugs]   dk.brics.automaton.RegExp
 [findbugs]   org.mozilla.javascript.Scriptable
 [findbugs]   org.mozilla.javascript.EcmaError
 [findbugs]   org.apache.hadoop.io.serializer.Serializer
 [findbugs]   org.fusesource.jansi.AnsiConsole
 [findbugs]   org.apache.hadoop.util.bloom.Filter
 [findbugs]   org.python.core.PyNone
 [findbugs]   org.mozilla.javascript.Function
 [findbugs]   org.python.core.PySystemState
 [findbugs]   org.antlr.runtime.RecognizerSharedState
 [findbugs]   org.codehaus.jackson.JsonFactory
 [findbugs]   org.antlr.runtime.EarlyExitException
 [findbugs]   org.apache.hadoop.hdfs.DistributedFileSystem
 [findbugs]   org.apache.hadoop.util.LineReader
 [findbugs] Warnings generated: 51
 [findbugs] Missing classes: 286
 [findbugs] Calculating exit code...
 [findbugs] Setting 'missing class' flag (2)
 [findbugs] Setting 'bugs found' flag (1)
 [findbugs] Exit code set to: 3
 [findbugs] Java Result: 3
 [findbugs] Classes needed for analysis were missing
 [findbugs] Output saved to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml>
     [xslt] Processing <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml> to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.html>
     [xslt] Loading stylesheet /home/jenkins/tools/findbugs/latest/src/xsl/default.xsl

BUILD SUCCESSFUL
Total time: 5 minutes 31 seconds


======================================================================
======================================================================
STORE: saving artifacts
======================================================================
======================================================================




======================================================================
======================================================================
CLEAN: cleaning workspace
======================================================================
======================================================================


Buildfile: build.xml

clean:
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src-gen>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/docs/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/test/org/apache/pig/test/utils/dotGraph/parser>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig.jar>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig-withouthadoop.jar>

clean:

clean:

BUILD SUCCESSFUL
Total time: 0 seconds


======================================================================
======================================================================
ANALYSIS: ant -Drun.clover=true -Dclover.home=/homes/hudson/tools/clover/latest clover test-commit generate-clover-reports -Dtest.junit.output.format=xml -Dtest.output=yes -Dversion=${BUILD_ID} -Dfindbugs.home=$FINDBUGS_HOME -Djava5.home=$JAVA5_HOME -Dforrest.home=$FORREST_HOME -Dclover.home=$CLOVER_HOME -Declipse.home=$ECLIPSE_HOME
======================================================================
======================================================================


Buildfile: build.xml

clover.setup:
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/clover/db>
[clover-setup] Clover Version 3.1.0, built on May 31 2011 (build-821)
[clover-setup] Loaded from: /home/jenkins/tools/clover/latest/lib/clover.jar

BUILD FAILED
java.lang.RuntimeException: Clover upgrades for your license ended December 14 2010, and this version of Clover was built May 31 2011. Please visit http://www.atlassian.com/clover/renew for information on upgrading your license.
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:103)
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:25)
	at com.cenqua.clover.tasks.AbstractCloverTask.execute(AbstractCloverTask.java:52)
	at org.apache.tools.ant.UnknownElement.execute(UnknownElement.java:288)
	at sun.reflect.GeneratedMethodAccessor1.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
	at java.lang.reflect.Method.invoke(Method.java:597)
	at org.apache.tools.ant.dispatch.DispatchUtils.execute(DispatchUtils.java:106)
	at org.apache.tools.ant.Task.perform(Task.java:348)
	at org.apache.tools.ant.Target.execute(Target.java:357)
	at org.apache.tools.ant.Target.performTasks(Target.java:385)
	at org.apache.tools.ant.Project.executeSortedTargets(Project.java:1337)
	at org.apache.tools.ant.Project.executeTarget(Project.java:1306)
	at org.apache.tools.ant.helper.DefaultExecutor.executeTargets(DefaultExecutor.java:41)
	at org.apache.tools.ant.Project.executeTargets(Project.java:1189)
	at org.apache.tools.ant.Main.runBuild(Main.java:758)
	at org.apache.tools.ant.Main.startAnt(Main.java:217)
	at org.apache.tools.ant.launch.Launcher.run(Launcher.java:257)
	at org.apache.tools.ant.launch.Launcher.main(Launcher.java:104)

Total time: 0 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1336

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1336/changes>

Changes:

[dvryaboy] Add test/org/apache/pig/test/junit for PIG-2908

[dvryaboy] PIG-2928: Fix e2e test failures in trunk: FilterBoolean_23/24

[dvryaboy] PIG-2908: Fix unit tests to work with jdk7

[jcoveney] PIG-2971: Add new parameter to specify the streaming environment (jcoveney)

[jcoveney] PIG-2963: Illustrate command and POPackageLite (cheolsoo via jcoveney)

------------------------------------------
[...truncated 6962 lines...]
 [findbugs]   org.apache.hadoop.fs.FSDataInputStream
 [findbugs]   org.python.core.PyObject
 [findbugs]   jline.History
 [findbugs]   org.jruby.embed.internal.LocalContextProvider
 [findbugs]   org.apache.hadoop.io.BooleanWritable
 [findbugs]   org.apache.log4j.Logger
 [findbugs]   org.apache.hadoop.hbase.filter.FamilyFilter
 [findbugs]   org.codehaus.jackson.annotate.JsonPropertyOrder
 [findbugs]   groovy.lang.Tuple
 [findbugs]   org.antlr.runtime.IntStream
 [findbugs]   org.apache.hadoop.util.ReflectionUtils
 [findbugs]   org.apache.hadoop.fs.ContentSummary
 [findbugs]   org.jruby.runtime.builtin.IRubyObject
 [findbugs]   org.jruby.RubyInteger
 [findbugs]   org.python.core.PyTuple
 [findbugs]   org.mortbay.log.Log
 [findbugs]   org.apache.hadoop.conf.Configuration
 [findbugs]   com.google.common.base.Joiner
 [findbugs]   org.apache.hadoop.mapreduce.lib.input.FileSplit
 [findbugs]   org.apache.hadoop.mapred.Counters$Counter
 [findbugs]   com.jcraft.jsch.Channel
 [findbugs]   org.apache.hadoop.mapred.JobPriority
 [findbugs]   org.apache.commons.cli.Options
 [findbugs]   org.apache.hadoop.mapred.JobID
 [findbugs]   org.apache.hadoop.util.bloom.BloomFilter
 [findbugs]   org.python.core.PyFrame
 [findbugs]   org.apache.hadoop.hbase.filter.CompareFilter
 [findbugs]   org.apache.hadoop.util.VersionInfo
 [findbugs]   org.python.core.PyString
 [findbugs]   org.apache.hadoop.io.Text$Comparator
 [findbugs]   org.jruby.runtime.Block
 [findbugs]   org.antlr.runtime.MismatchedSetException
 [findbugs]   org.apache.hadoop.io.BytesWritable
 [findbugs]   org.apache.hadoop.fs.FsShell
 [findbugs]   org.joda.time.Months
 [findbugs]   org.mozilla.javascript.ImporterTopLevel
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableOutputFormat
 [findbugs]   org.apache.hadoop.mapred.TaskReport
 [findbugs]   org.apache.hadoop.security.UserGroupInformation
 [findbugs]   org.antlr.runtime.tree.RewriteRuleSubtreeStream
 [findbugs]   org.apache.commons.cli.HelpFormatter
 [findbugs]   com.google.common.collect.Maps
 [findbugs]   org.joda.time.ReadableInstant
 [findbugs]   org.mozilla.javascript.NativeObject
 [findbugs]   org.apache.hadoop.hbase.HConstants
 [findbugs]   org.apache.hadoop.io.serializer.Deserializer
 [findbugs]   org.antlr.runtime.FailedPredicateException
 [findbugs]   org.apache.hadoop.io.compress.CompressionCodec
 [findbugs]   org.jruby.RubyNil
 [findbugs]   org.apache.hadoop.fs.FileStatus
 [findbugs]   org.apache.hadoop.hbase.client.Result
 [findbugs]   org.apache.hadoop.mapreduce.JobContext
 [findbugs]   org.codehaus.jackson.JsonGenerator
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptContext
 [findbugs]   org.apache.hadoop.io.BytesWritable$Comparator
 [findbugs]   org.apache.hadoop.io.LongWritable$Comparator
 [findbugs]   org.codehaus.jackson.map.util.LRUMap
 [findbugs]   org.apache.hadoop.hbase.util.Bytes
 [findbugs]   org.antlr.runtime.MismatchedTokenException
 [findbugs]   org.codehaus.jackson.JsonParser
 [findbugs]   com.jcraft.jsch.UserInfo
 [findbugs]   org.python.core.PyException
 [findbugs]   org.apache.commons.cli.ParseException
 [findbugs]   org.apache.hadoop.io.compress.CompressionOutputStream
 [findbugs]   org.apache.hadoop.hbase.filter.WritableByteArrayComparable
 [findbugs]   org.antlr.runtime.tree.CommonTreeNodeStream
 [findbugs]   org.apache.log4j.Level
 [findbugs]   org.apache.hadoop.hbase.client.Scan
 [findbugs]   org.jruby.anno.JRubyMethod
 [findbugs]   org.apache.hadoop.mapreduce.Job
 [findbugs]   com.google.common.util.concurrent.Futures
 [findbugs]   org.apache.commons.logging.LogFactory
 [findbugs]   org.apache.commons.collections.IteratorUtils
 [findbugs]   org.apache.commons.codec.binary.Base64
 [findbugs]   org.codehaus.jackson.map.ObjectMapper
 [findbugs]   org.apache.hadoop.fs.FileSystem
 [findbugs]   org.jruby.embed.LocalContextScope
 [findbugs]   org.apache.hadoop.hbase.filter.FilterList$Operator
 [findbugs]   org.jruby.RubySymbol
 [findbugs]   org.apache.hadoop.hbase.io.ImmutableBytesWritable
 [findbugs]   org.apache.hadoop.io.serializer.SerializationFactory
 [findbugs]   org.antlr.runtime.tree.TreeAdaptor
 [findbugs]   org.apache.hadoop.mapred.RunningJob
 [findbugs]   org.antlr.runtime.CommonTokenStream
 [findbugs]   org.apache.hadoop.io.DataInputBuffer
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile
 [findbugs]   org.apache.commons.cli.GnuParser
 [findbugs]   org.mozilla.javascript.Context
 [findbugs]   org.apache.hadoop.io.FloatWritable
 [findbugs]   org.antlr.runtime.tree.RewriteEarlyExitException
 [findbugs]   org.apache.hadoop.hbase.HBaseConfiguration
 [findbugs]   org.codehaus.jackson.JsonGenerationException
 [findbugs]   org.apache.hadoop.mapreduce.TaskInputOutputContext
 [findbugs]   org.apache.hadoop.io.compress.GzipCodec
 [findbugs]   org.jruby.RubyString
 [findbugs]   org.antlr.runtime.BaseRecognizer
 [findbugs]   org.apache.hadoop.fs.FileUtil
 [findbugs]   org.apache.hadoop.fs.Path
 [findbugs]   org.jruby.RubyFixnum
 [findbugs]   org.apache.hadoop.hbase.client.Put
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Writer
 [findbugs]   jline.ConsoleReader
 [findbugs]   com.google.common.collect.Lists
 [findbugs]   org.apache.hadoop.mapreduce.MapContext
 [findbugs]   org.python.core.PyJavaPackage
 [findbugs]   org.apache.hadoop.hbase.filter.ColumnPrefixFilter
 [findbugs]   org.python.core.PyStringMap
 [findbugs]   groovy.util.ResourceException
 [findbugs]   org.joda.time.format.DateTimeFormat
 [findbugs]   org.apache.hadoop.mapreduce.TaskID
 [findbugs]   org.joda.time.Period
 [findbugs]   org.jruby.embed.ScriptingContainer
 [findbugs]   org.jruby.Ruby
 [findbugs]   groovy.util.GroovyScriptEngine
 [findbugs]   org.apache.hadoop.hbase.client.HTable
 [findbugs]   org.apache.hadoop.io.FloatWritable$Comparator
 [findbugs]   org.apache.zookeeper.ZooKeeper
 [findbugs]   org.codehaus.jackson.map.JsonMappingException
 [findbugs]   org.python.core.PyFunction
 [findbugs]   org.antlr.runtime.TokenSource
 [findbugs]   com.google.common.io.Files
 [findbugs]   groovy.util.ScriptException
 [findbugs]   com.jcraft.jsch.ChannelDirectTCPIP
 [findbugs]   com.jcraft.jsch.JSchException
 [findbugs]   org.python.util.PythonInterpreter
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil
 [findbugs]   org.python.core.PyInteger
 [findbugs]   org.apache.hadoop.mapred.JobConf
 [findbugs]   org.joda.time.DateTime
 [findbugs]   org.apache.hadoop.util.bloom.Key
 [findbugs]   org.joda.time.Years
 [findbugs]   org.apache.hadoop.io.Text
 [findbugs]   org.antlr.runtime.NoViableAltException
 [findbugs]   org.apache.hadoop.util.GenericOptionsParser
 [findbugs]   org.apache.hadoop.mapreduce.JobID
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptID
 [findbugs]   org.apache.hadoop.filecache.DistributedCache
 [findbugs]   org.joda.time.DateTimeZone
 [findbugs]   org.apache.hadoop.fs.FSDataOutputStream
 [findbugs]   org.python.core.PyList
 [findbugs]   org.antlr.runtime.tree.TreeNodeStream
 [findbugs]   org.apache.hadoop.hbase.filter.BinaryComparator
 [findbugs]   dk.brics.automaton.RegExp
 [findbugs]   org.mozilla.javascript.Scriptable
 [findbugs]   org.mozilla.javascript.EcmaError
 [findbugs]   org.apache.hadoop.io.serializer.Serializer
 [findbugs]   org.fusesource.jansi.AnsiConsole
 [findbugs]   org.apache.hadoop.util.bloom.Filter
 [findbugs]   org.python.core.PyNone
 [findbugs]   org.mozilla.javascript.Function
 [findbugs]   org.python.core.PySystemState
 [findbugs]   org.antlr.runtime.RecognizerSharedState
 [findbugs]   org.codehaus.jackson.JsonFactory
 [findbugs]   org.antlr.runtime.EarlyExitException
 [findbugs]   org.apache.hadoop.hdfs.DistributedFileSystem
 [findbugs]   org.apache.hadoop.util.LineReader
 [findbugs] Warnings generated: 51
 [findbugs] Missing classes: 286
 [findbugs] Calculating exit code...
 [findbugs] Setting 'missing class' flag (2)
 [findbugs] Setting 'bugs found' flag (1)
 [findbugs] Exit code set to: 3
 [findbugs] Java Result: 3
 [findbugs] Classes needed for analysis were missing
 [findbugs] Output saved to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml>
     [xslt] Processing <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml> to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.html>
     [xslt] Loading stylesheet /home/jenkins/tools/findbugs/latest/src/xsl/default.xsl

BUILD SUCCESSFUL
Total time: 6 minutes 19 seconds


======================================================================
======================================================================
STORE: saving artifacts
======================================================================
======================================================================




======================================================================
======================================================================
CLEAN: cleaning workspace
======================================================================
======================================================================


Buildfile: build.xml

clean:
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src-gen>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/docs/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/test/org/apache/pig/test/utils/dotGraph/parser>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig.jar>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig-withouthadoop.jar>

clean:

clean:

BUILD SUCCESSFUL
Total time: 0 seconds


======================================================================
======================================================================
ANALYSIS: ant -Drun.clover=true -Dclover.home=/homes/hudson/tools/clover/latest clover test-commit generate-clover-reports -Dtest.junit.output.format=xml -Dtest.output=yes -Dversion=${BUILD_ID} -Dfindbugs.home=$FINDBUGS_HOME -Djava5.home=$JAVA5_HOME -Dforrest.home=$FORREST_HOME -Dclover.home=$CLOVER_HOME -Declipse.home=$ECLIPSE_HOME
======================================================================
======================================================================


Buildfile: build.xml

clover.setup:
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/clover/db>
[clover-setup] Clover Version 3.1.0, built on May 31 2011 (build-821)
[clover-setup] Loaded from: /home/jenkins/tools/clover/latest/lib/clover.jar

BUILD FAILED
java.lang.RuntimeException: Clover upgrades for your license ended December 14 2010, and this version of Clover was built May 31 2011. Please visit http://www.atlassian.com/clover/renew for information on upgrading your license.
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:103)
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:25)
	at com.cenqua.clover.tasks.AbstractCloverTask.execute(AbstractCloverTask.java:52)
	at org.apache.tools.ant.UnknownElement.execute(UnknownElement.java:288)
	at sun.reflect.GeneratedMethodAccessor1.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
	at java.lang.reflect.Method.invoke(Method.java:597)
	at org.apache.tools.ant.dispatch.DispatchUtils.execute(DispatchUtils.java:106)
	at org.apache.tools.ant.Task.perform(Task.java:348)
	at org.apache.tools.ant.Target.execute(Target.java:357)
	at org.apache.tools.ant.Target.performTasks(Target.java:385)
	at org.apache.tools.ant.Project.executeSortedTargets(Project.java:1337)
	at org.apache.tools.ant.Project.executeTarget(Project.java:1306)
	at org.apache.tools.ant.helper.DefaultExecutor.executeTargets(DefaultExecutor.java:41)
	at org.apache.tools.ant.Project.executeTargets(Project.java:1189)
	at org.apache.tools.ant.Main.runBuild(Main.java:758)
	at org.apache.tools.ant.Main.startAnt(Main.java:217)
	at org.apache.tools.ant.launch.Launcher.run(Launcher.java:257)
	at org.apache.tools.ant.launch.Launcher.main(Launcher.java:104)

Total time: 1 second
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1335

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1335/changes>

Changes:

[sms] PIG-2579: Support for multiple input schemas in AvroStorage (cheolsoo via sms). Add two files that were missed out.

[thejas] PIG-2910: Add function to read schema from outout of Schema.toString() (initialcontext via thejas)

------------------------------------------
[...truncated 36390 lines...]
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/10/13 10:32:21 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/10/13 10:32:21 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] Shutting down DataNode 2
    [junit] 12/10/13 10:32:21 INFO ipc.Server: Stopping server on 40037
    [junit] 12/10/13 10:32:21 INFO ipc.Server: IPC Server handler 2 on 40037: exiting
    [junit] 12/10/13 10:32:21 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/10/13 10:32:21 INFO ipc.Server: IPC Server handler 0 on 40037: exiting
    [junit] 12/10/13 10:32:21 INFO ipc.Server: Stopping IPC Server listener on 40037
    [junit] 12/10/13 10:32:21 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/13 10:32:21 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/10/13 10:32:21 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:55569, storageID=DS-826530128-67.195.138.20-55569-1350123848509, infoPort=45307, ipcPort=40037):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/10/13 10:32:21 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/10/13 10:32:21 INFO ipc.Server: IPC Server handler 1 on 40037: exiting
    [junit] 12/10/13 10:32:22 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/10/13 10:32:22 INFO mapred.TaskTracker: Received 'KillJobAction' for job: job_20121013102408923_0012
    [junit] 12/10/13 10:32:22 WARN mapred.TaskTracker: Unknown job job_20121013102408923_0012 being deleted.
    [junit] 12/10/13 10:32:22 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/13 10:32:22 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:55569, storageID=DS-826530128-67.195.138.20-55569-1350123848509, infoPort=45307, ipcPort=40037):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data5/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data6/current'}>
    [junit] 12/10/13 10:32:22 INFO ipc.Server: Stopping server on 40037
    [junit] 12/10/13 10:32:22 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/13 10:32:22 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/13 10:32:22 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/10/13 10:32:22 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/10/13 10:32:22 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-780551647
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-780551647
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] Shutting down DataNode 1
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/10/13 10:32:22 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/10/13 10:32:22 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/10/13 10:32:22 INFO ipc.Server: Stopping server on 52128
    [junit] 12/10/13 10:32:22 INFO ipc.Server: IPC Server handler 0 on 52128: exiting
    [junit] 12/10/13 10:32:22 INFO ipc.Server: IPC Server handler 1 on 52128: exiting
    [junit] 12/10/13 10:32:22 INFO ipc.Server: Stopping IPC Server listener on 52128
    [junit] 12/10/13 10:32:22 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/10/13 10:32:22 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/13 10:32:22 INFO ipc.Server: IPC Server handler 2 on 52128: exiting
    [junit] 12/10/13 10:32:22 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/10/13 10:32:22 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:51654, storageID=DS-1735019734-67.195.138.20-51654-1350123848128, infoPort=48124, ipcPort=52128):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/10/13 10:32:22 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/10/13 10:32:23 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:51654, storageID=DS-1735019734-67.195.138.20-51654-1350123848128, infoPort=48124, ipcPort=52128):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data3/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data4/current'}>
    [junit] 12/10/13 10:32:23 INFO ipc.Server: Stopping server on 52128
    [junit] 12/10/13 10:32:23 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/13 10:32:23 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/13 10:32:23 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/10/13 10:32:23 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/10/13 10:32:23 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/10/13 10:32:23 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/13 10:32:23 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1319661914
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1319661914
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] Shutting down DataNode 0
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/10/13 10:32:23 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/10/13 10:32:23 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/10/13 10:32:24 INFO ipc.Server: Stopping server on 49363
    [junit] 12/10/13 10:32:24 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/13 10:32:24 INFO ipc.Server: IPC Server handler 1 on 49363: exiting
    [junit] 12/10/13 10:32:24 INFO ipc.Server: Stopping IPC Server listener on 49363
    [junit] 12/10/13 10:32:24 INFO ipc.Server: IPC Server handler 2 on 49363: exiting
    [junit] 12/10/13 10:32:24 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:46055, storageID=DS-1321361236-67.195.138.20-46055-1350123847677, infoPort=39104, ipcPort=49363):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/10/13 10:32:24 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/10/13 10:32:24 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/10/13 10:32:24 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/10/13 10:32:24 INFO ipc.Server: IPC Server handler 0 on 49363: exiting
    [junit] 12/10/13 10:32:24 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/10/13 10:32:24 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:46055, storageID=DS-1321361236-67.195.138.20-46055-1350123847677, infoPort=39104, ipcPort=49363):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data1/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data2/current'}>
    [junit] 12/10/13 10:32:24 WARN util.MBeans: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.unRegisterMXBean(DataNode.java:513)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:726)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.run(DataNode.java:1442)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/10/13 10:32:24 INFO ipc.Server: Stopping server on 49363
    [junit] 12/10/13 10:32:24 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/13 10:32:24 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/13 10:32:24 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/10/13 10:32:24 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/10/13 10:32:24 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-244228332
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-244228332
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/10/13 10:32:24 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/10/13 10:32:24 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/10/13 10:32:24 WARN namenode.FSNamesystem: ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 12/10/13 10:32:24 INFO namenode.FSNamesystem: Number of transactions: 629 Total time for transactions(ms): 12Number of transactions batched in Syncs: 127 Number of syncs: 431 SyncTimes(ms): 9099 281 
    [junit] 12/10/13 10:32:24 INFO namenode.DecommissionManager: Interrupted Monitor
    [junit] java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.hdfs.server.namenode.DecommissionManager$Monitor.run(DecommissionManager.java:65)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/10/13 10:32:24 INFO ipc.Server: Stopping server on 52159
    [junit] 12/10/13 10:32:24 INFO ipc.Server: IPC Server handler 0 on 52159: exiting
    [junit] 12/10/13 10:32:24 INFO ipc.Server: IPC Server handler 1 on 52159: exiting
    [junit] 12/10/13 10:32:24 INFO ipc.Server: IPC Server handler 4 on 52159: exiting
    [junit] 12/10/13 10:32:24 INFO ipc.Server: IPC Server handler 3 on 52159: exiting
    [junit] 12/10/13 10:32:24 INFO ipc.Server: IPC Server handler 2 on 52159: exiting
    [junit] 12/10/13 10:32:24 INFO ipc.Server: IPC Server handler 5 on 52159: exiting
    [junit] 12/10/13 10:32:24 INFO ipc.Server: IPC Server handler 7 on 52159: exiting
    [junit] 12/10/13 10:32:24 INFO ipc.Server: IPC Server handler 6 on 52159: exiting
    [junit] 12/10/13 10:32:24 INFO ipc.Server: IPC Server handler 8 on 52159: exiting
    [junit] 12/10/13 10:32:24 INFO ipc.Server: IPC Server handler 9 on 52159: exiting
    [junit] 12/10/13 10:32:24 INFO ipc.Server: Stopping IPC Server listener on 52159
    [junit] 12/10/13 10:32:24 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/13 10:32:24 INFO ipc.Server: Stopping IPC Server Responder
    [junit] Tests run: 17, Failures: 4, Errors: 3, Time elapsed: 489.733 sec
    [junit] Test org.apache.pig.test.TestStore FAILED
    [junit] Running org.apache.pig.test.TestStringUDFs
    [junit] 12/10/13 10:32:25 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.NullPointerException
    [junit] 12/10/13 10:32:25 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/10/13 10:32:25 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -1
    [junit] 12/10/13 10:32:25 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -8
    [junit] 12/10/13 10:32:25 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/10/13 10:32:25 WARN builtin.INDEXOF: No logger object provided to UDF: org.apache.pig.builtin.INDEXOF. Failed to process input; error - null
    [junit] 12/10/13 10:32:25 WARN builtin.LAST_INDEX_OF: No logger object provided to UDF: org.apache.pig.builtin.LAST_INDEX_OF. Failed to process input; error - null
    [junit] Tests run: 12, Failures: 0, Errors: 0, Time elapsed: 0.247 sec
   [delete] Deleting directory /tmp/pig_junit_tmp1028832257

BUILD FAILED
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:814: The following error occurred while executing this line:
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:882: Tests failed!

Total time: 20 minutes 16 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1334

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1334/changes>

Changes:

[dvryaboy] Preparing for 0.12.0 development

[dvryaboy] Preparing for release 0.11.0

[jcoveney] PIG-2965: RANDOM should allow seed initialization for ease of testing (jcoveney)

------------------------------------------
[...truncated 6948 lines...]
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Reader$Scanner$Entry
 [findbugs]   org.apache.hadoop.fs.FSDataInputStream
 [findbugs]   org.python.core.PyObject
 [findbugs]   jline.History
 [findbugs]   org.jruby.embed.internal.LocalContextProvider
 [findbugs]   org.apache.hadoop.io.BooleanWritable
 [findbugs]   org.apache.log4j.Logger
 [findbugs]   org.apache.hadoop.hbase.filter.FamilyFilter
 [findbugs]   groovy.lang.Tuple
 [findbugs]   org.antlr.runtime.IntStream
 [findbugs]   org.apache.hadoop.util.ReflectionUtils
 [findbugs]   org.apache.hadoop.fs.ContentSummary
 [findbugs]   org.jruby.runtime.builtin.IRubyObject
 [findbugs]   org.jruby.RubyInteger
 [findbugs]   org.python.core.PyTuple
 [findbugs]   org.mortbay.log.Log
 [findbugs]   org.apache.hadoop.conf.Configuration
 [findbugs]   com.google.common.base.Joiner
 [findbugs]   org.apache.hadoop.mapreduce.lib.input.FileSplit
 [findbugs]   org.apache.hadoop.mapred.Counters$Counter
 [findbugs]   com.jcraft.jsch.Channel
 [findbugs]   org.apache.hadoop.mapred.JobPriority
 [findbugs]   org.apache.commons.cli.Options
 [findbugs]   org.apache.hadoop.mapred.JobID
 [findbugs]   org.apache.hadoop.util.bloom.BloomFilter
 [findbugs]   org.python.core.PyFrame
 [findbugs]   org.apache.hadoop.hbase.filter.CompareFilter
 [findbugs]   org.apache.hadoop.util.VersionInfo
 [findbugs]   org.python.core.PyString
 [findbugs]   org.apache.hadoop.io.Text$Comparator
 [findbugs]   org.jruby.runtime.Block
 [findbugs]   org.antlr.runtime.MismatchedSetException
 [findbugs]   org.apache.hadoop.io.BytesWritable
 [findbugs]   org.apache.hadoop.fs.FsShell
 [findbugs]   org.joda.time.Months
 [findbugs]   org.mozilla.javascript.ImporterTopLevel
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableOutputFormat
 [findbugs]   org.apache.hadoop.mapred.TaskReport
 [findbugs]   org.apache.hadoop.security.UserGroupInformation
 [findbugs]   org.antlr.runtime.tree.RewriteRuleSubtreeStream
 [findbugs]   org.apache.commons.cli.HelpFormatter
 [findbugs]   com.google.common.collect.Maps
 [findbugs]   org.joda.time.ReadableInstant
 [findbugs]   org.mozilla.javascript.NativeObject
 [findbugs]   org.apache.hadoop.hbase.HConstants
 [findbugs]   org.apache.hadoop.io.serializer.Deserializer
 [findbugs]   org.antlr.runtime.FailedPredicateException
 [findbugs]   org.apache.hadoop.io.compress.CompressionCodec
 [findbugs]   org.jruby.RubyNil
 [findbugs]   org.apache.hadoop.fs.FileStatus
 [findbugs]   org.apache.hadoop.hbase.client.Result
 [findbugs]   org.apache.hadoop.mapreduce.JobContext
 [findbugs]   org.codehaus.jackson.JsonGenerator
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptContext
 [findbugs]   org.apache.hadoop.io.BytesWritable$Comparator
 [findbugs]   org.apache.hadoop.io.LongWritable$Comparator
 [findbugs]   org.codehaus.jackson.map.util.LRUMap
 [findbugs]   org.apache.hadoop.hbase.util.Bytes
 [findbugs]   org.antlr.runtime.MismatchedTokenException
 [findbugs]   org.codehaus.jackson.JsonParser
 [findbugs]   com.jcraft.jsch.UserInfo
 [findbugs]   org.python.core.PyException
 [findbugs]   org.apache.commons.cli.ParseException
 [findbugs]   org.apache.hadoop.io.compress.CompressionOutputStream
 [findbugs]   org.apache.hadoop.hbase.filter.WritableByteArrayComparable
 [findbugs]   org.antlr.runtime.tree.CommonTreeNodeStream
 [findbugs]   org.apache.log4j.Level
 [findbugs]   org.apache.hadoop.hbase.client.Scan
 [findbugs]   org.jruby.anno.JRubyMethod
 [findbugs]   org.apache.hadoop.mapreduce.Job
 [findbugs]   com.google.common.util.concurrent.Futures
 [findbugs]   org.apache.commons.logging.LogFactory
 [findbugs]   org.apache.commons.collections.IteratorUtils
 [findbugs]   org.apache.commons.codec.binary.Base64
 [findbugs]   org.codehaus.jackson.map.ObjectMapper
 [findbugs]   org.apache.hadoop.fs.FileSystem
 [findbugs]   org.jruby.embed.LocalContextScope
 [findbugs]   org.apache.hadoop.hbase.filter.FilterList$Operator
 [findbugs]   org.jruby.RubySymbol
 [findbugs]   org.apache.hadoop.hbase.io.ImmutableBytesWritable
 [findbugs]   org.apache.hadoop.io.serializer.SerializationFactory
 [findbugs]   org.antlr.runtime.tree.TreeAdaptor
 [findbugs]   org.apache.hadoop.mapred.RunningJob
 [findbugs]   org.antlr.runtime.CommonTokenStream
 [findbugs]   org.apache.hadoop.io.DataInputBuffer
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile
 [findbugs]   org.apache.commons.cli.GnuParser
 [findbugs]   org.mozilla.javascript.Context
 [findbugs]   org.apache.hadoop.io.FloatWritable
 [findbugs]   org.antlr.runtime.tree.RewriteEarlyExitException
 [findbugs]   org.apache.hadoop.hbase.HBaseConfiguration
 [findbugs]   org.codehaus.jackson.JsonGenerationException
 [findbugs]   org.apache.hadoop.mapreduce.TaskInputOutputContext
 [findbugs]   org.apache.hadoop.io.compress.GzipCodec
 [findbugs]   org.jruby.RubyString
 [findbugs]   org.antlr.runtime.BaseRecognizer
 [findbugs]   org.apache.hadoop.fs.FileUtil
 [findbugs]   org.apache.hadoop.fs.Path
 [findbugs]   org.jruby.RubyFixnum
 [findbugs]   org.apache.hadoop.hbase.client.Put
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Writer
 [findbugs]   jline.ConsoleReader
 [findbugs]   com.google.common.collect.Lists
 [findbugs]   org.apache.hadoop.mapreduce.MapContext
 [findbugs]   org.python.core.PyJavaPackage
 [findbugs]   org.apache.hadoop.hbase.filter.ColumnPrefixFilter
 [findbugs]   org.python.core.PyStringMap
 [findbugs]   groovy.util.ResourceException
 [findbugs]   org.joda.time.format.DateTimeFormat
 [findbugs]   org.apache.hadoop.mapreduce.TaskID
 [findbugs]   org.joda.time.Period
 [findbugs]   org.jruby.embed.ScriptingContainer
 [findbugs]   org.jruby.Ruby
 [findbugs]   groovy.util.GroovyScriptEngine
 [findbugs]   org.apache.hadoop.hbase.client.HTable
 [findbugs]   org.apache.hadoop.io.FloatWritable$Comparator
 [findbugs]   org.apache.zookeeper.ZooKeeper
 [findbugs]   org.codehaus.jackson.map.JsonMappingException
 [findbugs]   org.python.core.PyFunction
 [findbugs]   org.antlr.runtime.TokenSource
 [findbugs]   com.google.common.io.Files
 [findbugs]   groovy.util.ScriptException
 [findbugs]   com.jcraft.jsch.ChannelDirectTCPIP
 [findbugs]   com.jcraft.jsch.JSchException
 [findbugs]   org.python.util.PythonInterpreter
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil
 [findbugs]   org.python.core.PyInteger
 [findbugs]   org.apache.hadoop.mapred.JobConf
 [findbugs]   org.joda.time.DateTime
 [findbugs]   org.apache.hadoop.util.bloom.Key
 [findbugs]   org.joda.time.Years
 [findbugs]   org.apache.hadoop.io.Text
 [findbugs]   org.antlr.runtime.NoViableAltException
 [findbugs]   org.apache.hadoop.util.GenericOptionsParser
 [findbugs]   org.apache.hadoop.mapreduce.JobID
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptID
 [findbugs]   org.apache.hadoop.filecache.DistributedCache
 [findbugs]   org.joda.time.DateTimeZone
 [findbugs]   org.apache.hadoop.fs.FSDataOutputStream
 [findbugs]   org.python.core.PyList
 [findbugs]   org.antlr.runtime.tree.TreeNodeStream
 [findbugs]   org.apache.hadoop.hbase.filter.BinaryComparator
 [findbugs]   dk.brics.automaton.RegExp
 [findbugs]   org.mozilla.javascript.Scriptable
 [findbugs]   org.mozilla.javascript.EcmaError
 [findbugs]   org.apache.hadoop.io.serializer.Serializer
 [findbugs]   org.fusesource.jansi.AnsiConsole
 [findbugs]   org.apache.hadoop.util.bloom.Filter
 [findbugs]   org.python.core.PyNone
 [findbugs]   org.mozilla.javascript.Function
 [findbugs]   org.python.core.PySystemState
 [findbugs]   org.antlr.runtime.RecognizerSharedState
 [findbugs]   org.codehaus.jackson.JsonFactory
 [findbugs]   org.antlr.runtime.EarlyExitException
 [findbugs]   org.apache.hadoop.hdfs.DistributedFileSystem
 [findbugs]   org.apache.hadoop.util.LineReader
 [findbugs] Warnings generated: 51
 [findbugs] Missing classes: 285
 [findbugs] Calculating exit code...
 [findbugs] Setting 'missing class' flag (2)
 [findbugs] Setting 'bugs found' flag (1)
 [findbugs] Exit code set to: 3
 [findbugs] Java Result: 3
 [findbugs] Classes needed for analysis were missing
 [findbugs] Output saved to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml>
     [xslt] Processing <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml> to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.html>
     [xslt] Loading stylesheet /home/jenkins/tools/findbugs/latest/src/xsl/default.xsl

BUILD SUCCESSFUL
Total time: 6 minutes 20 seconds


======================================================================
======================================================================
STORE: saving artifacts
======================================================================
======================================================================




======================================================================
======================================================================
CLEAN: cleaning workspace
======================================================================
======================================================================


Buildfile: build.xml

clean:
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src-gen>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/docs/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/test/org/apache/pig/test/utils/dotGraph/parser>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig.jar>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig-withouthadoop.jar>

clean:

clean:

BUILD SUCCESSFUL
Total time: 0 seconds


======================================================================
======================================================================
ANALYSIS: ant -Drun.clover=true -Dclover.home=/homes/hudson/tools/clover/latest clover test-commit generate-clover-reports -Dtest.junit.output.format=xml -Dtest.output=yes -Dversion=${BUILD_ID} -Dfindbugs.home=$FINDBUGS_HOME -Djava5.home=$JAVA5_HOME -Dforrest.home=$FORREST_HOME -Dclover.home=$CLOVER_HOME -Declipse.home=$ECLIPSE_HOME
======================================================================
======================================================================


Buildfile: build.xml

clover.setup:
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/clover/db>
[clover-setup] Clover Version 3.1.0, built on May 31 2011 (build-821)
[clover-setup] Loaded from: /home/jenkins/tools/clover/latest/lib/clover.jar

BUILD FAILED
java.lang.RuntimeException: Clover upgrades for your license ended December 14 2010, and this version of Clover was built May 31 2011. Please visit http://www.atlassian.com/clover/renew for information on upgrading your license.
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:103)
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:25)
	at com.cenqua.clover.tasks.AbstractCloverTask.execute(AbstractCloverTask.java:52)
	at org.apache.tools.ant.UnknownElement.execute(UnknownElement.java:288)
	at sun.reflect.GeneratedMethodAccessor1.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
	at java.lang.reflect.Method.invoke(Method.java:597)
	at org.apache.tools.ant.dispatch.DispatchUtils.execute(DispatchUtils.java:106)
	at org.apache.tools.ant.Task.perform(Task.java:348)
	at org.apache.tools.ant.Target.execute(Target.java:357)
	at org.apache.tools.ant.Target.performTasks(Target.java:385)
	at org.apache.tools.ant.Project.executeSortedTargets(Project.java:1337)
	at org.apache.tools.ant.Project.executeTarget(Project.java:1306)
	at org.apache.tools.ant.helper.DefaultExecutor.executeTargets(DefaultExecutor.java:41)
	at org.apache.tools.ant.Project.executeTargets(Project.java:1189)
	at org.apache.tools.ant.Main.runBuild(Main.java:758)
	at org.apache.tools.ant.Main.startAnt(Main.java:217)
	at org.apache.tools.ant.launch.Launcher.run(Launcher.java:257)
	at org.apache.tools.ant.launch.Launcher.main(Launcher.java:104)

Total time: 0 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1333

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1333/changes>

Changes:

[billgraham] PIG-2964: Add helper method getJobList() to PigStats.JobGraph. Extend visibility of couple methods on same class (prkommireddi via billgraham)

[sms] PIG-2579: Support for multiple input schemas in AvroStorage (cheolsoo via sms)

------------------------------------------
[...truncated 35907 lines...]
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/10/12 10:36:07 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/10/12 10:36:07 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/10/12 10:36:07 INFO ipc.Server: Stopping server on 60319
    [junit] 12/10/12 10:36:07 INFO ipc.Server: IPC Server handler 1 on 60319: exiting
    [junit] 12/10/12 10:36:07 INFO ipc.Server: IPC Server handler 2 on 60319: exiting
    [junit] 12/10/12 10:36:07 INFO ipc.Server: IPC Server handler 0 on 60319: exiting
    [junit] 12/10/12 10:36:07 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/10/12 10:36:07 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/12 10:36:07 INFO ipc.Server: Stopping IPC Server listener on 60319
    [junit] 12/10/12 10:36:07 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/10/12 10:36:07 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:58890, storageID=DS-1072995515-67.195.138.20-58890-1350037664961, infoPort=60166, ipcPort=60319):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/10/12 10:36:07 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/10/12 10:36:08 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/10/12 10:36:08 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/12 10:36:08 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:58890, storageID=DS-1072995515-67.195.138.20-58890-1350037664961, infoPort=60166, ipcPort=60319):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data5/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data6/current'}>
    [junit] 12/10/12 10:36:08 INFO ipc.Server: Stopping server on 60319
    [junit] 12/10/12 10:36:08 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/12 10:36:08 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/12 10:36:08 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/10/12 10:36:08 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/10/12 10:36:08 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId580194863
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId580194863
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] Shutting down DataNode 1
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/10/12 10:36:08 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/10/12 10:36:08 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/10/12 10:36:08 INFO ipc.Server: Stopping server on 50323
    [junit] 12/10/12 10:36:08 INFO ipc.Server: IPC Server handler 1 on 50323: exiting
    [junit] 12/10/12 10:36:08 INFO ipc.Server: IPC Server handler 0 on 50323: exiting
    [junit] 12/10/12 10:36:08 INFO ipc.Server: Stopping IPC Server listener on 50323
    [junit] 12/10/12 10:36:08 INFO ipc.Server: IPC Server handler 2 on 50323: exiting
    [junit] 12/10/12 10:36:08 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/12 10:36:08 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/10/12 10:36:08 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:44491, storageID=DS-427777727-67.195.138.20-44491-1350037664579, infoPort=50430, ipcPort=50323):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/10/12 10:36:08 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/10/12 10:36:08 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/12 10:36:08 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/10/12 10:36:08 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:44491, storageID=DS-427777727-67.195.138.20-44491-1350037664579, infoPort=50430, ipcPort=50323):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data3/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data4/current'}>
    [junit] 12/10/12 10:36:08 INFO ipc.Server: Stopping server on 50323
    [junit] 12/10/12 10:36:08 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/12 10:36:08 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/12 10:36:08 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/10/12 10:36:08 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/10/12 10:36:08 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1999968105
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1999968105
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] Shutting down DataNode 0
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/10/12 10:36:08 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/10/12 10:36:08 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/10/12 10:36:09 INFO ipc.Server: Stopping server on 60482
    [junit] 12/10/12 10:36:09 INFO ipc.Server: IPC Server handler 0 on 60482: exiting
    [junit] 12/10/12 10:36:09 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/12 10:36:09 INFO ipc.Server: Stopping IPC Server listener on 60482
    [junit] 12/10/12 10:36:09 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/10/12 10:36:09 INFO ipc.Server: IPC Server handler 1 on 60482: exiting
    [junit] 12/10/12 10:36:09 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/10/12 10:36:09 INFO ipc.Server: IPC Server handler 2 on 60482: exiting
    [junit] 12/10/12 10:36:09 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:44427, storageID=DS-1328646207-67.195.138.20-44427-1350037664164, infoPort=50534, ipcPort=60482):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/10/12 10:36:09 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/10/12 10:36:09 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/10/12 10:36:09 INFO mapred.TaskTracker: Received 'KillJobAction' for job: job_20121012102745389_0012
    [junit] 12/10/12 10:36:09 WARN mapred.TaskTracker: Unknown job job_20121012102745389_0012 being deleted.
    [junit] 12/10/12 10:36:09 INFO mapred.TaskTracker: Received 'KillJobAction' for job: job_20121012102745389_0012
    [junit] 12/10/12 10:36:09 WARN mapred.TaskTracker: Unknown job job_20121012102745389_0012 being deleted.
    [junit] 12/10/12 10:36:10 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/12 10:36:10 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:44427, storageID=DS-1328646207-67.195.138.20-44427-1350037664164, infoPort=50534, ipcPort=60482):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data1/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data2/current'}>
    [junit] 12/10/12 10:36:10 WARN util.MBeans: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.unRegisterMXBean(DataNode.java:513)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:726)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.run(DataNode.java:1442)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/10/12 10:36:10 INFO ipc.Server: Stopping server on 60482
    [junit] 12/10/12 10:36:10 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/12 10:36:10 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/12 10:36:10 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/10/12 10:36:10 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/10/12 10:36:10 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId2007973396
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId2007973396
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/10/12 10:36:10 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/10/12 10:36:10 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/10/12 10:36:10 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:44491 to delete  blk_-3589319600245743387_1127 blk_7017847405562164561_1133 blk_1303708581521468644_1134 blk_1283184091728964954_1134
    [junit] 12/10/12 10:36:10 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:46475 to delete  blk_1303708581521468644_1134
    [junit] 12/10/12 10:36:10 WARN namenode.FSNamesystem: ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 12/10/12 10:36:10 INFO namenode.DecommissionManager: Interrupted Monitor
    [junit] java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.hdfs.server.namenode.DecommissionManager$Monitor.run(DecommissionManager.java:65)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/10/12 10:36:10 INFO namenode.FSNamesystem: Number of transactions: 629 Total time for transactions(ms): 6Number of transactions batched in Syncs: 134 Number of syncs: 431 SyncTimes(ms): 7197 271 
    [junit] 12/10/12 10:36:10 INFO ipc.Server: Stopping server on 48479
    [junit] 12/10/12 10:36:10 INFO ipc.Server: IPC Server handler 0 on 48479: exiting
    [junit] 12/10/12 10:36:10 INFO ipc.Server: IPC Server handler 1 on 48479: exiting
    [junit] 12/10/12 10:36:10 INFO ipc.Server: IPC Server handler 2 on 48479: exiting
    [junit] 12/10/12 10:36:10 INFO ipc.Server: IPC Server handler 3 on 48479: exiting
    [junit] 12/10/12 10:36:10 INFO ipc.Server: IPC Server handler 5 on 48479: exiting
    [junit] 12/10/12 10:36:10 INFO ipc.Server: IPC Server handler 4 on 48479: exiting
    [junit] 12/10/12 10:36:10 INFO ipc.Server: IPC Server handler 7 on 48479: exiting
    [junit] 12/10/12 10:36:10 INFO ipc.Server: IPC Server handler 8 on 48479: exiting
    [junit] 12/10/12 10:36:10 INFO ipc.Server: IPC Server handler 6 on 48479: exiting
    [junit] 12/10/12 10:36:10 INFO ipc.Server: IPC Server handler 9 on 48479: exiting
    [junit] 12/10/12 10:36:10 INFO ipc.Server: Stopping IPC Server listener on 48479
    [junit] 12/10/12 10:36:10 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/12 10:36:10 INFO ipc.Server: Stopping IPC Server Responder
    [junit] Tests run: 17, Failures: 4, Errors: 3, Time elapsed: 499.214 sec
    [junit] Test org.apache.pig.test.TestStore FAILED
    [junit] Running org.apache.pig.test.TestStringUDFs
    [junit] 12/10/12 10:36:11 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.NullPointerException
    [junit] 12/10/12 10:36:11 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/10/12 10:36:11 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -1
    [junit] 12/10/12 10:36:11 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -8
    [junit] 12/10/12 10:36:11 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/10/12 10:36:11 WARN builtin.INDEXOF: No logger object provided to UDF: org.apache.pig.builtin.INDEXOF. Failed to process input; error - null
    [junit] 12/10/12 10:36:11 WARN builtin.LAST_INDEX_OF: No logger object provided to UDF: org.apache.pig.builtin.LAST_INDEX_OF. Failed to process input; error - null
    [junit] Tests run: 12, Failures: 0, Errors: 0, Time elapsed: 0.242 sec
   [delete] Deleting directory /tmp/pig_junit_tmp703839347

BUILD FAILED
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:814: The following error occurred while executing this line:
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:882: Tests failed!

Total time: 20 minutes 49 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1332

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1332/changes>

Changes:

[jcoveney] PIG-2961: BinInterSedesRawComparator broken by TUPLE_number patch (jcoveney)

------------------------------------------
[...truncated 35778 lines...]
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] Shutting down DataNode 2
    [junit] 12/10/08 22:32:21 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/10/08 22:32:21 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/10/08 22:32:21 INFO ipc.Server: Stopping server on 39635
    [junit] 12/10/08 22:32:21 INFO ipc.Server: IPC Server handler 0 on 39635: exiting
    [junit] 12/10/08 22:32:21 INFO ipc.Server: IPC Server handler 1 on 39635: exiting
    [junit] 12/10/08 22:32:21 INFO ipc.Server: IPC Server handler 2 on 39635: exiting
    [junit] 12/10/08 22:32:21 INFO ipc.Server: Stopping IPC Server listener on 39635
    [junit] 12/10/08 22:32:21 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/08 22:32:21 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/10/08 22:32:21 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/10/08 22:32:21 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:41457, storageID=DS-872114770-67.195.138.20-41457-1349735058522, infoPort=56463, ipcPort=39635):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/10/08 22:32:21 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/10/08 22:32:22 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:43950 to delete  blk_7651325145052526712_1133 blk_2563143186939838473_1127 blk_-5649663348552015441_1134 blk_-8741414859741998132_1134
    [junit] 12/10/08 22:32:22 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:41457 to delete  blk_7651325145052526712_1133 blk_2563143186939838473_1127 blk_-5649663348552015441_1134 blk_-8741414859741998132_1134
    [junit] 12/10/08 22:32:22 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/10/08 22:32:22 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/08 22:32:22 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:41457, storageID=DS-872114770-67.195.138.20-41457-1349735058522, infoPort=56463, ipcPort=39635):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data5/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data6/current'}>
    [junit] 12/10/08 22:32:22 INFO ipc.Server: Stopping server on 39635
    [junit] 12/10/08 22:32:22 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/08 22:32:22 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/08 22:32:22 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] Shutting down DataNode 1
    [junit] 12/10/08 22:32:22 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/10/08 22:32:22 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId947030558
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId947030558
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/10/08 22:32:22 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/10/08 22:32:22 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/10/08 22:32:22 INFO mapred.TaskTracker: Received 'KillJobAction' for job: job_20121008222418964_0012
    [junit] 12/10/08 22:32:22 WARN mapred.TaskTracker: Unknown job job_20121008222418964_0012 being deleted.
    [junit] 12/10/08 22:32:22 INFO ipc.Server: Stopping server on 47211
    [junit] 12/10/08 22:32:22 INFO ipc.Server: IPC Server handler 0 on 47211: exiting
    [junit] 12/10/08 22:32:22 INFO ipc.Server: Stopping IPC Server listener on 47211
    [junit] 12/10/08 22:32:22 INFO ipc.Server: IPC Server handler 2 on 47211: exiting
    [junit] 12/10/08 22:32:22 INFO ipc.Server: IPC Server handler 1 on 47211: exiting
    [junit] 12/10/08 22:32:22 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/08 22:32:22 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/10/08 22:32:22 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/10/08 22:32:22 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:43950, storageID=DS-70303555-67.195.138.20-43950-1349735058137, infoPort=55986, ipcPort=47211):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/10/08 22:32:22 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/10/08 22:32:23 INFO mapred.TaskTracker: Received 'KillJobAction' for job: job_20121008222418964_0012
    [junit] 12/10/08 22:32:23 WARN mapred.TaskTracker: Unknown job job_20121008222418964_0012 being deleted.
    [junit] 12/10/08 22:32:23 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/10/08 22:32:23 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/08 22:32:23 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:43950, storageID=DS-70303555-67.195.138.20-43950-1349735058137, infoPort=55986, ipcPort=47211):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data3/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data4/current'}>
    [junit] 12/10/08 22:32:23 INFO ipc.Server: Stopping server on 47211
    [junit] 12/10/08 22:32:23 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/08 22:32:23 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/08 22:32:23 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/10/08 22:32:23 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/10/08 22:32:23 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1749715165
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1749715165
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] Shutting down DataNode 0
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/10/08 22:32:23 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/10/08 22:32:23 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/10/08 22:32:23 INFO ipc.Server: Stopping server on 47713
    [junit] 12/10/08 22:32:23 INFO ipc.Server: IPC Server handler 2 on 47713: exiting
    [junit] 12/10/08 22:32:23 INFO ipc.Server: IPC Server handler 0 on 47713: exiting
    [junit] 12/10/08 22:32:23 INFO ipc.Server: IPC Server handler 1 on 47713: exiting
    [junit] 12/10/08 22:32:23 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/10/08 22:32:23 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/08 22:32:23 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/10/08 22:32:23 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:43729, storageID=DS-1364106718-67.195.138.20-43729-1349735057744, infoPort=33062, ipcPort=47713):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/10/08 22:32:23 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/10/08 22:32:23 INFO ipc.Server: Stopping IPC Server listener on 47713
    [junit] 12/10/08 22:32:24 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/10/08 22:32:24 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/08 22:32:24 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:43729, storageID=DS-1364106718-67.195.138.20-43729-1349735057744, infoPort=33062, ipcPort=47713):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data1/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data2/current'}>
    [junit] 12/10/08 22:32:24 WARN util.MBeans: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.unRegisterMXBean(DataNode.java:513)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:726)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.run(DataNode.java:1442)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/10/08 22:32:24 INFO ipc.Server: Stopping server on 47713
    [junit] 12/10/08 22:32:24 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/08 22:32:24 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/08 22:32:24 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/10/08 22:32:24 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/10/08 22:32:24 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId10276099
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId10276099
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/10/08 22:32:24 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/10/08 22:32:24 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/10/08 22:32:25 INFO namenode.DecommissionManager: Interrupted Monitor
    [junit] java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.hdfs.server.namenode.DecommissionManager$Monitor.run(DecommissionManager.java:65)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/10/08 22:32:25 INFO namenode.FSNamesystem: Number of transactions: 629 Total time for transactions(ms): 9Number of transactions batched in Syncs: 126 Number of syncs: 432 SyncTimes(ms): 6359 602 
    [junit] 12/10/08 22:32:25 WARN namenode.FSNamesystem: ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 12/10/08 22:32:25 INFO ipc.Server: Stopping server on 52088
    [junit] 12/10/08 22:32:25 INFO ipc.Server: IPC Server handler 0 on 52088: exiting
    [junit] 12/10/08 22:32:25 INFO ipc.Server: IPC Server handler 1 on 52088: exiting
    [junit] 12/10/08 22:32:25 INFO ipc.Server: IPC Server handler 4 on 52088: exiting
    [junit] 12/10/08 22:32:25 INFO ipc.Server: IPC Server handler 5 on 52088: exiting
    [junit] 12/10/08 22:32:25 INFO ipc.Server: IPC Server handler 2 on 52088: exiting
    [junit] 12/10/08 22:32:25 INFO ipc.Server: IPC Server handler 6 on 52088: exiting
    [junit] 12/10/08 22:32:25 INFO ipc.Server: IPC Server handler 3 on 52088: exiting
    [junit] 12/10/08 22:32:25 INFO ipc.Server: IPC Server handler 8 on 52088: exiting
    [junit] 12/10/08 22:32:25 INFO ipc.Server: IPC Server handler 7 on 52088: exiting
    [junit] 12/10/08 22:32:25 INFO ipc.Server: IPC Server handler 9 on 52088: exiting
    [junit] 12/10/08 22:32:25 INFO ipc.Server: Stopping IPC Server listener on 52088
    [junit] 12/10/08 22:32:25 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/08 22:32:25 INFO ipc.Server: Stopping IPC Server Responder
    [junit] Tests run: 17, Failures: 4, Errors: 3, Time elapsed: 480.557 sec
    [junit] Test org.apache.pig.test.TestStore FAILED
    [junit] Running org.apache.pig.test.TestStringUDFs
    [junit] 12/10/08 22:32:26 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.NullPointerException
    [junit] 12/10/08 22:32:26 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/10/08 22:32:26 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -1
    [junit] 12/10/08 22:32:26 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -8
    [junit] 12/10/08 22:32:26 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/10/08 22:32:26 WARN builtin.INDEXOF: No logger object provided to UDF: org.apache.pig.builtin.INDEXOF. Failed to process input; error - null
    [junit] 12/10/08 22:32:26 WARN builtin.LAST_INDEX_OF: No logger object provided to UDF: org.apache.pig.builtin.LAST_INDEX_OF. Failed to process input; error - null
    [junit] Tests run: 12, Failures: 0, Errors: 0, Time elapsed: 0.243 sec
   [delete] Deleting directory /tmp/pig_junit_tmp254501787

BUILD FAILED
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:814: The following error occurred while executing this line:
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:882: Tests failed!

Total time: 20 minutes 0 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1331

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1331/changes>

Changes:

[gdfm] PIG-2946: Documentation of "history" and "clear" commands (xalan via azaroth)

------------------------------------------
[...truncated 37035 lines...]
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] Shutting down DataNode 2
    [junit] 12/10/08 10:32:19 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/10/08 10:32:19 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/10/08 10:32:20 INFO ipc.Server: Stopping server on 43624
    [junit] 12/10/08 10:32:20 INFO ipc.Server: IPC Server handler 0 on 43624: exiting
    [junit] 12/10/08 10:32:20 INFO ipc.Server: IPC Server handler 1 on 43624: exiting
    [junit] 12/10/08 10:32:20 INFO ipc.Server: Stopping IPC Server listener on 43624
    [junit] 12/10/08 10:32:20 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/08 10:32:20 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/10/08 10:32:20 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/10/08 10:32:20 INFO ipc.Server: IPC Server handler 2 on 43624: exiting
    [junit] 12/10/08 10:32:20 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:43255, storageID=DS-1389027340-67.195.138.20-43255-1349691845280, infoPort=39859, ipcPort=43624):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/10/08 10:32:20 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/10/08 10:32:20 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/10/08 10:32:20 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:43255, storageID=DS-1389027340-67.195.138.20-43255-1349691845280, infoPort=39859, ipcPort=43624):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data5/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data6/current'}>
    [junit] 12/10/08 10:32:20 INFO ipc.Server: Stopping server on 43624
    [junit] 12/10/08 10:32:20 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/08 10:32:20 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/08 10:32:20 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/10/08 10:32:20 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/10/08 10:32:21 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] Shutting down DataNode 1
    [junit] 12/10/08 10:32:21 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1623552745
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1623552745
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/10/08 10:32:21 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/10/08 10:32:21 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/10/08 10:32:21 INFO ipc.Server: Stopping server on 50419
    [junit] 12/10/08 10:32:21 INFO ipc.Server: Stopping IPC Server listener on 50419
    [junit] 12/10/08 10:32:21 INFO ipc.Server: IPC Server handler 2 on 50419: exiting
    [junit] 12/10/08 10:32:21 INFO ipc.Server: IPC Server handler 0 on 50419: exiting
    [junit] 12/10/08 10:32:21 INFO ipc.Server: IPC Server handler 1 on 50419: exiting
    [junit] 12/10/08 10:32:21 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/08 10:32:21 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/10/08 10:32:21 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/10/08 10:32:21 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:56529, storageID=DS-1252201617-67.195.138.20-56529-1349691844895, infoPort=60459, ipcPort=50419):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/10/08 10:32:21 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/10/08 10:32:21 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:56529 to delete  blk_-4578268783795232705_1134 blk_6709459043720732275_1134 blk_3746414447636786443_1127 blk_-429034783813444838_1133
    [junit] 12/10/08 10:32:21 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:48241 to delete  blk_-4578268783795232705_1134 blk_3746414447636786443_1127 blk_-429034783813444838_1133
    [junit] 12/10/08 10:32:21 INFO mapred.TaskTracker: Received 'KillJobAction' for job: job_20121008102405700_0012
    [junit] 12/10/08 10:32:21 WARN mapred.TaskTracker: Unknown job job_20121008102405700_0012 being deleted.
    [junit] 12/10/08 10:32:21 INFO datanode.DataBlockScanner: Verification succeeded for blk_-6357657632426518875_1001
    [junit] 12/10/08 10:32:21 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/10/08 10:32:22 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/08 10:32:22 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:56529, storageID=DS-1252201617-67.195.138.20-56529-1349691844895, infoPort=60459, ipcPort=50419):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data3/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data4/current'}>
    [junit] 12/10/08 10:32:22 INFO ipc.Server: Stopping server on 50419
    [junit] 12/10/08 10:32:22 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/08 10:32:22 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/08 10:32:22 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/10/08 10:32:22 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] Shutting down DataNode 0
    [junit] 12/10/08 10:32:22 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-485706938
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-485706938
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/10/08 10:32:22 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/10/08 10:32:22 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/10/08 10:32:22 INFO ipc.Server: Stopping server on 46733
    [junit] 12/10/08 10:32:22 INFO ipc.Server: Stopping IPC Server listener on 46733
    [junit] 12/10/08 10:32:22 INFO ipc.Server: IPC Server handler 1 on 46733: exiting
    [junit] 12/10/08 10:32:22 INFO ipc.Server: IPC Server handler 2 on 46733: exiting
    [junit] 12/10/08 10:32:22 INFO ipc.Server: IPC Server handler 0 on 46733: exiting
    [junit] 12/10/08 10:32:22 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/10/08 10:32:22 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/08 10:32:22 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/10/08 10:32:22 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:59545, storageID=DS-1284226331-67.195.138.20-59545-1349691844452, infoPort=35828, ipcPort=46733):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/10/08 10:32:22 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/10/08 10:32:22 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/10/08 10:32:22 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:59545, storageID=DS-1284226331-67.195.138.20-59545-1349691844452, infoPort=35828, ipcPort=46733):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data1/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data2/current'}>
    [junit] 12/10/08 10:32:22 WARN util.MBeans: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.unRegisterMXBean(DataNode.java:513)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:726)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.run(DataNode.java:1442)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/10/08 10:32:22 INFO ipc.Server: Stopping server on 46733
    [junit] 12/10/08 10:32:22 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/08 10:32:22 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/08 10:32:22 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/10/08 10:32:22 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/10/08 10:32:23 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/08 10:32:23 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-530204830
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-530204830
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/10/08 10:32:23 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/10/08 10:32:23 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/10/08 10:32:23 WARN namenode.FSNamesystem: ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 12/10/08 10:32:23 INFO namenode.DecommissionManager: Interrupted Monitor
    [junit] java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.hdfs.server.namenode.DecommissionManager$Monitor.run(DecommissionManager.java:65)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/10/08 10:32:23 INFO namenode.FSNamesystem: Number of transactions: 629 Total time for transactions(ms): 12Number of transactions batched in Syncs: 133 Number of syncs: 432 SyncTimes(ms): 7434 296 
    [junit] 12/10/08 10:32:23 INFO ipc.Server: Stopping server on 40807
    [junit] 12/10/08 10:32:23 INFO ipc.Server: IPC Server handler 0 on 40807: exiting
    [junit] 12/10/08 10:32:23 INFO ipc.Server: IPC Server handler 4 on 40807: exiting
    [junit] 12/10/08 10:32:23 INFO ipc.Server: IPC Server handler 1 on 40807: exiting
    [junit] 12/10/08 10:32:23 INFO ipc.Server: IPC Server handler 5 on 40807: exiting
    [junit] 12/10/08 10:32:23 INFO ipc.Server: IPC Server handler 3 on 40807: exiting
    [junit] 12/10/08 10:32:23 INFO ipc.Server: IPC Server handler 7 on 40807: exiting
    [junit] 12/10/08 10:32:23 INFO ipc.Server: IPC Server handler 6 on 40807: exiting
    [junit] 12/10/08 10:32:23 INFO ipc.Server: IPC Server handler 9 on 40807: exiting
    [junit] 12/10/08 10:32:23 INFO ipc.Server: IPC Server handler 8 on 40807: exiting
    [junit] 12/10/08 10:32:23 INFO ipc.Server: IPC Server handler 2 on 40807: exiting
    [junit] 12/10/08 10:32:23 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/10/08 10:32:23 INFO ipc.Server: Stopping IPC Server listener on 40807
    [junit] 12/10/08 10:32:23 INFO metrics.RpcInstrumentation: shut down
    [junit] Tests run: 17, Failures: 4, Errors: 3, Time elapsed: 492.131 sec
    [junit] Test org.apache.pig.test.TestStore FAILED
    [junit] Running org.apache.pig.test.TestStringUDFs
    [junit] 12/10/08 10:32:24 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.NullPointerException
    [junit] 12/10/08 10:32:24 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/10/08 10:32:24 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -1
    [junit] 12/10/08 10:32:24 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -8
    [junit] 12/10/08 10:32:24 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/10/08 10:32:24 WARN builtin.INDEXOF: No logger object provided to UDF: org.apache.pig.builtin.INDEXOF. Failed to process input; error - null
    [junit] 12/10/08 10:32:24 WARN builtin.LAST_INDEX_OF: No logger object provided to UDF: org.apache.pig.builtin.LAST_INDEX_OF. Failed to process input; error - null
    [junit] Tests run: 12, Failures: 0, Errors: 0, Time elapsed: 0.251 sec
   [delete] Deleting directory /tmp/pig_junit_tmp901720824

BUILD FAILED
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:814: The following error occurred while executing this line:
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:882: Tests failed!

Total time: 20 minutes 3 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1330

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1330/changes>

Changes:

[jcoveney] PIG-2877: Make SchemaTuple work in foreach (and thus, in loads) (jcoveney) -- add PigConstants.java

------------------------------------------
[...truncated 36497 lines...]
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/10/06 10:32:05 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] Shutting down DataNode 2
    [junit] 12/10/06 10:32:05 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/10/06 10:32:05 INFO ipc.Server: Stopping server on 36813
    [junit] 12/10/06 10:32:05 INFO ipc.Server: IPC Server handler 0 on 36813: exiting
    [junit] 12/10/06 10:32:05 INFO ipc.Server: IPC Server handler 1 on 36813: exiting
    [junit] 12/10/06 10:32:05 INFO ipc.Server: IPC Server handler 2 on 36813: exiting
    [junit] 12/10/06 10:32:05 INFO ipc.Server: Stopping IPC Server listener on 36813
    [junit] 12/10/06 10:32:05 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/10/06 10:32:05 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/06 10:32:05 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/10/06 10:32:05 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:36019, storageID=DS-53916008-67.195.138.20-36019-1349519047342, infoPort=43256, ipcPort=36813):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/10/06 10:32:05 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/10/06 10:32:06 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/10/06 10:32:06 INFO mapred.TaskTracker: Received 'KillJobAction' for job: job_20121006102407777_0012
    [junit] 12/10/06 10:32:06 WARN mapred.TaskTracker: Unknown job job_20121006102407777_0012 being deleted.
    [junit] 12/10/06 10:32:06 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/06 10:32:06 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:36019, storageID=DS-53916008-67.195.138.20-36019-1349519047342, infoPort=43256, ipcPort=36813):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data5/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data6/current'}>
    [junit] 12/10/06 10:32:06 INFO ipc.Server: Stopping server on 36813
    [junit] 12/10/06 10:32:06 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/06 10:32:06 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/06 10:32:06 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/10/06 10:32:06 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/10/06 10:32:06 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-440464043
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-440464043
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/10/06 10:32:06 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] Shutting down DataNode 1
    [junit] 12/10/06 10:32:07 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/10/06 10:32:07 INFO ipc.Server: Stopping server on 55984
    [junit] 12/10/06 10:32:07 INFO ipc.Server: IPC Server handler 0 on 55984: exiting
    [junit] 12/10/06 10:32:07 INFO ipc.Server: Stopping IPC Server listener on 55984
    [junit] 12/10/06 10:32:07 INFO ipc.Server: IPC Server handler 2 on 55984: exiting
    [junit] 12/10/06 10:32:07 INFO ipc.Server: IPC Server handler 1 on 55984: exiting
    [junit] 12/10/06 10:32:07 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/06 10:32:07 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/10/06 10:32:07 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/10/06 10:32:07 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:53472, storageID=DS-1408047884-67.195.138.20-53472-1349519046962, infoPort=35497, ipcPort=55984):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/10/06 10:32:07 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/10/06 10:32:07 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:53472, storageID=DS-1408047884-67.195.138.20-53472-1349519046962, infoPort=35497, ipcPort=55984):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data3/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data4/current'}>
    [junit] 12/10/06 10:32:07 INFO ipc.Server: Stopping server on 55984
    [junit] 12/10/06 10:32:07 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/06 10:32:07 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/06 10:32:07 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/10/06 10:32:07 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/10/06 10:32:07 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/10/06 10:32:08 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/06 10:32:08 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1712084962
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1712084962
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/10/06 10:32:08 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] Shutting down DataNode 0
    [junit] 12/10/06 10:32:08 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/10/06 10:32:08 INFO ipc.Server: Stopping server on 36622
    [junit] 12/10/06 10:32:08 INFO ipc.Server: IPC Server handler 0 on 36622: exiting
    [junit] 12/10/06 10:32:08 INFO ipc.Server: IPC Server handler 1 on 36622: exiting
    [junit] 12/10/06 10:32:08 INFO ipc.Server: Stopping IPC Server listener on 36622
    [junit] 12/10/06 10:32:08 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/10/06 10:32:08 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/06 10:32:08 INFO ipc.Server: IPC Server handler 2 on 36622: exiting
    [junit] 12/10/06 10:32:08 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/10/06 10:32:08 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:53424, storageID=DS-1922814871-67.195.138.20-53424-1349519046573, infoPort=58923, ipcPort=36622):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/10/06 10:32:08 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/10/06 10:32:08 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:53424 to delete  blk_4575107253183800548_1127 blk_923726920449756119_1134 blk_8125950019483240227_1134
    [junit] 12/10/06 10:32:08 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:43521 to delete  blk_4880337137960515186_1133 blk_4575107253183800548_1127 blk_8125950019483240227_1134
    [junit] 12/10/06 10:32:08 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/10/06 10:32:09 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/06 10:32:09 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:53424, storageID=DS-1922814871-67.195.138.20-53424-1349519046573, infoPort=58923, ipcPort=36622):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data1/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data2/current'}>
    [junit] 12/10/06 10:32:09 WARN util.MBeans: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.unRegisterMXBean(DataNode.java:513)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:726)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.run(DataNode.java:1442)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/10/06 10:32:09 INFO ipc.Server: Stopping server on 36622
    [junit] 12/10/06 10:32:09 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/06 10:32:09 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/06 10:32:09 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/10/06 10:32:09 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/10/06 10:32:09 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId700967896
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId700967896
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/10/06 10:32:09 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/10/06 10:32:09 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/10/06 10:32:09 WARN namenode.FSNamesystem: ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 12/10/06 10:32:09 INFO namenode.DecommissionManager: Interrupted Monitor
    [junit] java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.hdfs.server.namenode.DecommissionManager$Monitor.run(DecommissionManager.java:65)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/10/06 10:32:09 INFO namenode.FSNamesystem: Number of transactions: 629 Total time for transactions(ms): 15Number of transactions batched in Syncs: 126 Number of syncs: 432 SyncTimes(ms): 5082 247 
    [junit] 12/10/06 10:32:09 INFO ipc.Server: Stopping server on 48263
    [junit] 12/10/06 10:32:09 INFO ipc.Server: IPC Server handler 0 on 48263: exiting
    [junit] 12/10/06 10:32:09 INFO ipc.Server: IPC Server handler 2 on 48263: exiting
    [junit] 12/10/06 10:32:09 INFO ipc.Server: IPC Server handler 1 on 48263: exiting
    [junit] 12/10/06 10:32:09 INFO ipc.Server: IPC Server handler 3 on 48263: exiting
    [junit] 12/10/06 10:32:09 INFO ipc.Server: IPC Server handler 4 on 48263: exiting
    [junit] 12/10/06 10:32:09 INFO ipc.Server: IPC Server handler 5 on 48263: exiting
    [junit] 12/10/06 10:32:09 INFO ipc.Server: IPC Server handler 6 on 48263: exiting
    [junit] 12/10/06 10:32:09 INFO ipc.Server: IPC Server handler 7 on 48263: exiting
    [junit] 12/10/06 10:32:09 INFO ipc.Server: IPC Server handler 8 on 48263: exiting
    [junit] 12/10/06 10:32:09 INFO ipc.Server: IPC Server handler 9 on 48263: exiting
    [junit] 12/10/06 10:32:09 INFO ipc.Server: Stopping IPC Server listener on 48263
    [junit] 12/10/06 10:32:09 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/06 10:32:09 INFO ipc.Server: Stopping IPC Server Responder
    [junit] Tests run: 17, Failures: 4, Errors: 3, Time elapsed: 475.992 sec
    [junit] Test org.apache.pig.test.TestStore FAILED
    [junit] Running org.apache.pig.test.TestStringUDFs
    [junit] 12/10/06 10:32:10 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.NullPointerException
    [junit] 12/10/06 10:32:10 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/10/06 10:32:10 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -1
    [junit] 12/10/06 10:32:10 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -8
    [junit] 12/10/06 10:32:10 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/10/06 10:32:10 WARN builtin.INDEXOF: No logger object provided to UDF: org.apache.pig.builtin.INDEXOF. Failed to process input; error - null
    [junit] 12/10/06 10:32:10 WARN builtin.LAST_INDEX_OF: No logger object provided to UDF: org.apache.pig.builtin.LAST_INDEX_OF. Failed to process input; error - null
    [junit] Tests run: 12, Failures: 0, Errors: 0, Time elapsed: 0.24 sec
   [delete] Deleting directory /tmp/pig_junit_tmp1927619760

BUILD FAILED
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:814: The following error occurred while executing this line:
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:882: Tests failed!

Total time: 19 minutes 49 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1329

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1329/changes>

Changes:

[gates] PIG-2932 Setting high default_parallel causes IOException in local mode

[jcoveney] PIG-2877: Make SchemaTuple work in foreach (and thus, in loads) (jcoveney)

------------------------------------------
[...truncated 2772 lines...]
	---------------------------------------------------------------------

ivy-compile:
[ivy:retrieve] :: retrieving :: org.apache.pig#pig
[ivy:retrieve] 	confs: [compile]
[ivy:retrieve] 	69 artifacts copied, 0 already retrieved (62857kB/2750ms)
[ivy:cachepath] DEPRECATED: 'ivy.conf.file' is deprecated, use 'ivy.settings.file' instead
[ivy:cachepath] :: loading settings :: file = <https://builds.apache.org/job/Pig-trunk/ws/trunk/ivy/ivysettings.xml>

init:
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/src-gen/org/apache/pig/impl/logicalLayer/parser>
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/src-gen/org/apache/pig/tools/pigscript/parser>
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/src-gen/org/apache/pig/tools/parameters>
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/classes>
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/classes>
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/test/org/apache/pig/test/utils/dotGraph/parser>
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/src-gen/org/apache/pig/data/parser>
     [move] Moving 1 file to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/ivy/lib/Pig>

cc-compile:
   [javacc] Java Compiler Compiler Version 4.2 (Parser Generator)
   [javacc] (type "javacc" with no arguments for help)
   [javacc] Reading from file <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/org/apache/pig/tools/pigscript/parser/PigScriptParser.jj> . . .
   [javacc] File "TokenMgrError.java" does not exist.  Will create one.
   [javacc] File "ParseException.java" does not exist.  Will create one.
   [javacc] File "Token.java" does not exist.  Will create one.
   [javacc] File "JavaCharStream.java" does not exist.  Will create one.
   [javacc] Parser generated successfully.
   [javacc] Java Compiler Compiler Version 4.2 (Parser Generator)
   [javacc] (type "javacc" with no arguments for help)
   [javacc] Reading from file <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/org/apache/pig/tools/parameters/PigFileParser.jj> . . .
   [javacc] Warning: Lookahead adequacy checking not being performed since option LOOKAHEAD is more than 1.  Set option FORCE_LA_CHECK to true to force checking.
   [javacc] File "TokenMgrError.java" does not exist.  Will create one.
   [javacc] File "ParseException.java" does not exist.  Will create one.
   [javacc] File "Token.java" does not exist.  Will create one.
   [javacc] File "JavaCharStream.java" does not exist.  Will create one.
   [javacc] Parser generated with 0 errors and 1 warnings.
   [javacc] Java Compiler Compiler Version 4.2 (Parser Generator)
   [javacc] (type "javacc" with no arguments for help)
   [javacc] Reading from file <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/org/apache/pig/tools/parameters/ParamLoader.jj> . . .
   [javacc] File "TokenMgrError.java" is being rebuilt.
   [javacc] File "ParseException.java" is being rebuilt.
   [javacc] File "Token.java" is being rebuilt.
   [javacc] File "JavaCharStream.java" is being rebuilt.
   [javacc] Parser generated successfully.
   [jjtree] Java Compiler Compiler Version 4.2 (Tree Builder)
   [jjtree] (type "jjtree" with no arguments for help)
   [jjtree] Reading from file <https://builds.apache.org/job/Pig-trunk/ws/trunk/test/org/apache/pig/test/utils/dotGraph/DOTParser.jjt> . . .
   [jjtree] File "Node.java" does not exist.  Will create one.
   [jjtree] File "SimpleNode.java" does not exist.  Will create one.
   [jjtree] File "DOTParserTreeConstants.java" does not exist.  Will create one.
   [jjtree] File "JJTDOTParserState.java" does not exist.  Will create one.
   [jjtree] Annotated grammar generated successfully in <https://builds.apache.org/job/Pig-trunk/ws/trunk/test/org/apache/pig/test/utils/dotGraph/parser/DOTParser.jj>
   [javacc] Java Compiler Compiler Version 4.2 (Parser Generator)
   [javacc] (type "javacc" with no arguments for help)
   [javacc] Reading from file <https://builds.apache.org/job/Pig-trunk/ws/trunk/test/org/apache/pig/test/utils/dotGraph/parser/DOTParser.jj> . . .
   [javacc] File "TokenMgrError.java" does not exist.  Will create one.
   [javacc] File "ParseException.java" does not exist.  Will create one.
   [javacc] File "Token.java" does not exist.  Will create one.
   [javacc] File "SimpleCharStream.java" does not exist.  Will create one.
   [javacc] Parser generated successfully.

prepare:
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/src-gen/org/apache/pig/parser>

genLexer:

genParser:

genTreeParser:

gen:

compile:
     [echo] *** Building Main Sources ***
     [echo] *** To compile with all warnings enabled, supply -Dall.warnings=1 on command line ***
     [echo] *** If all.warnings property is supplied, compile-sources-all-warnings target will be executed ***
     [echo] *** Else, compile-sources (which only warns about deprecations) target will be executed ***

compile-sources:
    [javac] Compiling 786 source files to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/classes>
    [javac] <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/org/apache/pig/data/SchemaTupleBackend.java>:21: cannot find symbol
    [javac] symbol  : class PigConstants
    [javac] location: package org.apache.pig
    [javac] import static org.apache.pig.PigConstants.SCHEMA_TUPLE_ON_BY_DEFAULT;
    [javac]                             ^
    [javac] <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/org/apache/pig/data/SchemaTupleBackend.java>:21: static import only from classes and interfaces
    [javac] import static org.apache.pig.PigConstants.SCHEMA_TUPLE_ON_BY_DEFAULT;
    [javac] ^
    [javac] <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/org/apache/pig/data/SchemaTupleBackend.java>:37: cannot find symbol
    [javac] symbol  : class PigConstants
    [javac] location: package org.apache.pig
    [javac] import org.apache.pig.PigConstants;
    [javac]                      ^
    [javac] <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/org/apache/pig/data/SchemaTupleFrontend.java>:21: cannot find symbol
    [javac] symbol  : class PigConstants
    [javac] location: package org.apache.pig
    [javac] import static org.apache.pig.PigConstants.GENERATED_CLASSES_KEY;
    [javac]                             ^
    [javac] <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/org/apache/pig/data/SchemaTupleFrontend.java>:21: static import only from classes and interfaces
    [javac] import static org.apache.pig.PigConstants.GENERATED_CLASSES_KEY;
    [javac] ^
    [javac] <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/org/apache/pig/data/SchemaTupleFrontend.java>:22: cannot find symbol
    [javac] symbol  : class PigConstants
    [javac] location: package org.apache.pig
    [javac] import static org.apache.pig.PigConstants.LOCAL_CODE_DIR;
    [javac]                             ^
    [javac] <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/org/apache/pig/data/SchemaTupleFrontend.java>:22: static import only from classes and interfaces
    [javac] import static org.apache.pig.PigConstants.LOCAL_CODE_DIR;
    [javac] ^
    [javac] <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/org/apache/pig/data/SchemaTupleFrontend.java>:23: cannot find symbol
    [javac] symbol  : class PigConstants
    [javac] location: package org.apache.pig
    [javac] import static org.apache.pig.PigConstants.SCHEMA_TUPLE_ON_BY_DEFAULT;
    [javac]                             ^
    [javac] <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/org/apache/pig/data/SchemaTupleFrontend.java>:23: static import only from classes and interfaces
    [javac] import static org.apache.pig.PigConstants.SCHEMA_TUPLE_ON_BY_DEFAULT;
    [javac] ^
    [javac] <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/org/apache/pig/data/SchemaTupleBackend.java>:76: cannot find symbol
    [javac] symbol  : variable PigConstants
    [javac] location: class org.apache.pig.data.SchemaTupleBackend
    [javac]             String localCodeDir = jConf.get(PigConstants.LOCAL_CODE_DIR);
    [javac]                                             ^
    [javac] <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/org/apache/pig/data/SchemaTupleBackend.java>:82: cannot find symbol
    [javac] symbol  : variable PigConstants
    [javac] location: class org.apache.pig.data.SchemaTupleBackend
    [javac]             codeDir = new File(jConf.get(PigConstants.LOCAL_CODE_DIR));
    [javac]                                          ^
    [javac] <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/org/apache/pig/data/SchemaTupleBackend.java>:154: cannot find symbol
    [javac] symbol  : variable SCHEMA_TUPLE_ON_BY_DEFAULT
    [javac] location: class org.apache.pig.data.SchemaTupleBackend
    [javac]         if (!jConf.getBoolean(SHOULD_USE_SCHEMA_TUPLE, SCHEMA_TUPLE_ON_BY_DEFAULT)) {
    [javac]                                                        ^
    [javac] <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/org/apache/pig/data/SchemaTupleBackend.java>:177: cannot find symbol
    [javac] symbol  : variable PigConstants
    [javac] location: class org.apache.pig.data.SchemaTupleBackend
    [javac]         String toDeserialize = jConf.get(PigConstants.GENERATED_CLASSES_KEY);
    [javac]                                          ^
    [javac] <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/org/apache/pig/data/SchemaTupleBackend.java>:179: cannot find symbol
    [javac] symbol  : variable PigConstants
    [javac] location: class org.apache.pig.data.SchemaTupleBackend
    [javac]             LOG.info("No classes in in key [" + PigConstants.GENERATED_CLASSES_KEY + "] to copy from distributed cache.");
    [javac]                                                 ^
    [javac] <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/org/apache/pig/data/SchemaTupleBackend.java>:182: cannot find symbol
    [javac] symbol  : variable PigConstants
    [javac] location: class org.apache.pig.data.SchemaTupleBackend
    [javac]         LOG.info("Copying files in key ["+PigConstants.GENERATED_CLASSES_KEY+"] from distributed cache: " + toDeserialize);
    [javac]                                           ^
    [javac] <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/org/apache/pig/data/SchemaTupleFrontend.java>:117: cannot find symbol
    [javac] symbol  : variable LOCAL_CODE_DIR
    [javac] location: class org.apache.pig.data.SchemaTupleFrontend.SchemaTupleFrontendGenHelper
    [javac]                         + LOCAL_CODE_DIR + "] with code temp directory: " + codePath);
    [javac]                           ^
    [javac] <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/org/apache/pig/data/SchemaTupleFrontend.java>:118: cannot find symbol
    [javac] symbol  : variable LOCAL_CODE_DIR
    [javac] location: class org.apache.pig.data.SchemaTupleFrontend.SchemaTupleFrontendGenHelper
    [javac]                     conf.set(LOCAL_CODE_DIR, codePath);
    [javac]                              ^
    [javac] <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/org/apache/pig/data/SchemaTupleFrontend.java>:123: cannot find symbol
    [javac] symbol  : variable LOCAL_CODE_DIR
    [javac] location: class org.apache.pig.data.SchemaTupleFrontend.SchemaTupleFrontendGenHelper
    [javac]                 conf.set(LOCAL_CODE_DIR, codePath);
    [javac]                          ^
    [javac] <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/org/apache/pig/data/SchemaTupleFrontend.java>:167: cannot find symbol
    [javac] symbol  : variable GENERATED_CLASSES_KEY
    [javac] location: class org.apache.pig.data.SchemaTupleFrontend.SchemaTupleFrontendGenHelper
    [javac]             LOG.info("Setting key [" + GENERATED_CLASSES_KEY + "] with classes to deserialize [" + toSer + "]");
    [javac]                                        ^
    [javac] <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/org/apache/pig/data/SchemaTupleFrontend.java>:169: cannot find symbol
    [javac] symbol  : variable GENERATED_CLASSES_KEY
    [javac] location: class org.apache.pig.data.SchemaTupleFrontend.SchemaTupleFrontendGenHelper
    [javac]             conf.set(GENERATED_CLASSES_KEY, toSer);
    [javac]                      ^
    [javac] <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/org/apache/pig/data/SchemaTupleFrontend.java>:179: cannot find symbol
    [javac] symbol  : variable SCHEMA_TUPLE_ON_BY_DEFAULT
    [javac] location: class org.apache.pig.data.SchemaTupleFrontend.SchemaTupleFrontendGenHelper
    [javac]             if (!conf.getBoolean(SHOULD_USE_SCHEMA_TUPLE, SCHEMA_TUPLE_ON_BY_DEFAULT)) {
    [javac]                                                           ^
    [javac] <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/org/apache/pig/data/SchemaTupleFrontend.java>:233: cannot find symbol
    [javac] symbol  : variable GENERATED_CLASSES_KEY
    [javac] location: class org.apache.pig.data.SchemaTupleFrontend
    [javac]                 prop.remove(GENERATED_CLASSES_KEY);
    [javac]                             ^
    [javac] <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/org/apache/pig/data/SchemaTupleFrontend.java>:234: cannot find symbol
    [javac] symbol  : variable LOCAL_CODE_DIR
    [javac] location: class org.apache.pig.data.SchemaTupleFrontend
    [javac]                 prop.remove(LOCAL_CODE_DIR);
    [javac]                             ^
    [javac] <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/org/apache/pig/data/SchemaTupleFrontend.java>:281: cannot find symbol
    [javac] symbol  : variable GENERATED_CLASSES_KEY
    [javac] location: class org.apache.pig.data.SchemaTupleFrontend
    [javac]         String value = conf.get(GENERATED_CLASSES_KEY);
    [javac]                                 ^
    [javac] <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/org/apache/pig/data/SchemaTupleFrontend.java>:283: cannot find symbol
    [javac] symbol  : variable GENERATED_CLASSES_KEY
    [javac] location: class org.apache.pig.data.SchemaTupleFrontend
    [javac]             prop.setProperty(GENERATED_CLASSES_KEY, value);
    [javac]                              ^
    [javac] <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/org/apache/pig/data/SchemaTupleFrontend.java>:285: cannot find symbol
    [javac] symbol  : variable GENERATED_CLASSES_KEY
    [javac] location: class org.apache.pig.data.SchemaTupleFrontend
    [javac]             prop.remove(GENERATED_CLASSES_KEY);
    [javac]                         ^
    [javac] <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/org/apache/pig/data/SchemaTupleFrontend.java>:287: cannot find symbol
    [javac] symbol  : variable LOCAL_CODE_DIR
    [javac] location: class org.apache.pig.data.SchemaTupleFrontend
    [javac]         value = conf.get(LOCAL_CODE_DIR);
    [javac]                          ^
    [javac] <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/org/apache/pig/data/SchemaTupleFrontend.java>:289: cannot find symbol
    [javac] symbol  : variable LOCAL_CODE_DIR
    [javac] location: class org.apache.pig.data.SchemaTupleFrontend
    [javac]             prop.setProperty(LOCAL_CODE_DIR, value);
    [javac]                              ^
    [javac] <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/org/apache/pig/data/SchemaTupleFrontend.java>:291: cannot find symbol
    [javac] symbol  : variable LOCAL_CODE_DIR
    [javac] location: class org.apache.pig.data.SchemaTupleFrontend
    [javac]             prop.remove(LOCAL_CODE_DIR);
    [javac]                         ^
    [javac] Note: Some input files use or override a deprecated API.
    [javac] Note: Recompile with -Xlint:deprecation for details.
    [javac] Note: Some input files use unchecked or unsafe operations.
    [javac] Note: Recompile with -Xlint:unchecked for details.
    [javac] 29 errors

BUILD FAILED
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:451: The following error occurred while executing this line:
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:503: Compile failed; see the compiler error output for details.

Total time: 36 seconds


======================================================================
======================================================================
STORE: saving artifacts
======================================================================
======================================================================


mv: cannot stat `build/*.tar.gz': No such file or directory
mv: cannot stat `build/*.jar': No such file or directory
mv: cannot stat `build/test/findbugs': No such file or directory
mv: cannot stat `build/docs/api': No such file or directory
Build Failed
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1328

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1328/changes>

Changes:

[jcoveney] PIG-2737: [piggybank] TestIndexedStorage is failing, should be refactored (jcoveney)

[daijy] PIG-2944: ivysettings.xml does not let you override .m2/repository

------------------------------------------
[...truncated 35949 lines...]
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] Shutting down DataNode 2
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/10/04 22:59:00 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/10/04 22:59:00 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/10/04 22:59:00 INFO ipc.Server: Stopping server on 36826
    [junit] 12/10/04 22:59:00 INFO ipc.Server: IPC Server handler 0 on 36826: exiting
    [junit] 12/10/04 22:59:00 INFO ipc.Server: IPC Server handler 2 on 36826: exiting
    [junit] 12/10/04 22:59:00 INFO ipc.Server: Stopping IPC Server listener on 36826
    [junit] 12/10/04 22:59:00 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/04 22:59:00 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/10/04 22:59:00 INFO ipc.Server: IPC Server handler 1 on 36826: exiting
    [junit] 12/10/04 22:59:00 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/10/04 22:59:00 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:53526, storageID=DS-1699195994-67.195.138.20-53526-1349391046987, infoPort=57376, ipcPort=36826):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/10/04 22:59:00 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/10/04 22:59:01 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/10/04 22:59:01 INFO datanode.DataNode: Scheduling block blk_-2159142538880085882_1133 file build/test/data/dfs/data/data2/current/blk_-2159142538880085882 for deletion
    [junit] 12/10/04 22:59:01 INFO datanode.DataNode: Scheduling block blk_-1890238983732947252_1134 file build/test/data/dfs/data/data1/current/blk_-1890238983732947252 for deletion
    [junit] 12/10/04 22:59:01 INFO datanode.DataNode: Deleted block blk_-2159142538880085882_1133 at file build/test/data/dfs/data/data2/current/blk_-2159142538880085882
    [junit] 12/10/04 22:59:01 INFO datanode.DataNode: Scheduling block blk_6523039248380329171_1134 file build/test/data/dfs/data/data2/current/blk_6523039248380329171 for deletion
    [junit] 12/10/04 22:59:01 INFO datanode.DataNode: Deleted block blk_6523039248380329171_1134 at file build/test/data/dfs/data/data2/current/blk_6523039248380329171
    [junit] 12/10/04 22:59:01 INFO datanode.DataNode: Deleted block blk_-1890238983732947252_1134 at file build/test/data/dfs/data/data1/current/blk_-1890238983732947252
    [junit] 12/10/04 22:59:01 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/04 22:59:01 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:53526, storageID=DS-1699195994-67.195.138.20-53526-1349391046987, infoPort=57376, ipcPort=36826):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data5/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data6/current'}>
    [junit] 12/10/04 22:59:01 INFO ipc.Server: Stopping server on 36826
    [junit] 12/10/04 22:59:01 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/04 22:59:01 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/04 22:59:01 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/10/04 22:59:01 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/10/04 22:59:01 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId885208781
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId885208781
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/10/04 22:59:01 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/10/04 22:59:01 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] Shutting down DataNode 1
    [junit] 12/10/04 22:59:01 INFO ipc.Server: Stopping server on 47680
    [junit] 12/10/04 22:59:01 INFO ipc.Server: IPC Server handler 1 on 47680: exiting
    [junit] 12/10/04 22:59:01 INFO ipc.Server: IPC Server handler 0 on 47680: exiting
    [junit] 12/10/04 22:59:01 INFO ipc.Server: Stopping IPC Server listener on 47680
    [junit] 12/10/04 22:59:01 INFO ipc.Server: IPC Server handler 2 on 47680: exiting
    [junit] 12/10/04 22:59:01 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/04 22:59:01 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/10/04 22:59:01 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/10/04 22:59:01 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:35745, storageID=DS-1496623399-67.195.138.20-35745-1349391046610, infoPort=41436, ipcPort=47680):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/10/04 22:59:01 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/10/04 22:59:01 INFO mapred.TaskTracker: Received 'KillJobAction' for job: job_20121004225047398_0012
    [junit] 12/10/04 22:59:01 WARN mapred.TaskTracker: Unknown job job_20121004225047398_0012 being deleted.
    [junit] 12/10/04 22:59:01 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/10/04 22:59:01 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:35745, storageID=DS-1496623399-67.195.138.20-35745-1349391046610, infoPort=41436, ipcPort=47680):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data3/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data4/current'}>
    [junit] 12/10/04 22:59:01 INFO ipc.Server: Stopping server on 47680
    [junit] 12/10/04 22:59:01 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/04 22:59:01 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/04 22:59:01 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/10/04 22:59:01 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/10/04 22:59:02 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/04 22:59:02 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-124186707
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-124186707
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] Shutting down DataNode 0
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/10/04 22:59:02 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/10/04 22:59:02 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/10/04 22:59:02 INFO ipc.Server: Stopping server on 60794
    [junit] 12/10/04 22:59:02 INFO ipc.Server: IPC Server handler 1 on 60794: exiting
    [junit] 12/10/04 22:59:02 INFO ipc.Server: IPC Server handler 0 on 60794: exiting
    [junit] 12/10/04 22:59:02 INFO ipc.Server: Stopping IPC Server listener on 60794
    [junit] 12/10/04 22:59:02 INFO ipc.Server: IPC Server handler 2 on 60794: exiting
    [junit] 12/10/04 22:59:02 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/04 22:59:02 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/10/04 22:59:02 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/10/04 22:59:02 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:49994, storageID=DS-298365707-67.195.138.20-49994-1349391046211, infoPort=58508, ipcPort=60794):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/10/04 22:59:02 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/10/04 22:59:02 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/10/04 22:59:02 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:35745 to delete  blk_6221835146902212862_1127 blk_6523039248380329171_1134 blk_-2159142538880085882_1133 blk_-1890238983732947252_1134
    [junit] 12/10/04 22:59:02 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:53526 to delete  blk_6221835146902212862_1127 blk_-2159142538880085882_1133 blk_-1890238983732947252_1134
    [junit] 12/10/04 22:59:03 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/04 22:59:03 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:49994, storageID=DS-298365707-67.195.138.20-49994-1349391046211, infoPort=58508, ipcPort=60794):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data1/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data2/current'}>
    [junit] 12/10/04 22:59:03 WARN util.MBeans: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.unRegisterMXBean(DataNode.java:513)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:726)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.run(DataNode.java:1442)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/10/04 22:59:03 INFO ipc.Server: Stopping server on 60794
    [junit] 12/10/04 22:59:03 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/04 22:59:03 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/04 22:59:03 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/10/04 22:59:03 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/10/04 22:59:03 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1510288525
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1510288525
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/10/04 22:59:03 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/10/04 22:59:03 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/10/04 22:59:03 WARN namenode.FSNamesystem: ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 12/10/04 22:59:03 INFO namenode.FSNamesystem: Number of transactions: 629 Total time for transactions(ms): 7Number of transactions batched in Syncs: 124 Number of syncs: 432 SyncTimes(ms): 7801 348 
    [junit] 12/10/04 22:59:03 INFO namenode.DecommissionManager: Interrupted Monitor
    [junit] java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.hdfs.server.namenode.DecommissionManager$Monitor.run(DecommissionManager.java:65)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/10/04 22:59:03 INFO ipc.Server: Stopping server on 50739
    [junit] 12/10/04 22:59:03 INFO ipc.Server: IPC Server handler 3 on 50739: exiting
    [junit] 12/10/04 22:59:03 INFO ipc.Server: IPC Server handler 1 on 50739: exiting
    [junit] 12/10/04 22:59:03 INFO ipc.Server: IPC Server handler 0 on 50739: exiting
    [junit] 12/10/04 22:59:03 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/04 22:59:03 INFO ipc.Server: IPC Server handler 5 on 50739: exiting
    [junit] 12/10/04 22:59:03 INFO ipc.Server: Stopping IPC Server listener on 50739
    [junit] 12/10/04 22:59:03 INFO ipc.Server: IPC Server handler 7 on 50739: exiting
    [junit] 12/10/04 22:59:03 INFO ipc.Server: IPC Server handler 9 on 50739: exiting
    [junit] 12/10/04 22:59:03 INFO ipc.Server: IPC Server handler 6 on 50739: exiting
    [junit] 12/10/04 22:59:03 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/10/04 22:59:03 INFO ipc.Server: IPC Server handler 2 on 50739: exiting
    [junit] 12/10/04 22:59:03 INFO ipc.Server: IPC Server handler 4 on 50739: exiting
    [junit] 12/10/04 22:59:03 INFO ipc.Server: IPC Server handler 8 on 50739: exiting
    [junit] Tests run: 17, Failures: 4, Errors: 3, Time elapsed: 490.924 sec
    [junit] Test org.apache.pig.test.TestStore FAILED
    [junit] Running org.apache.pig.test.TestStringUDFs
    [junit] 12/10/04 22:59:04 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.NullPointerException
    [junit] 12/10/04 22:59:04 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/10/04 22:59:04 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -1
    [junit] 12/10/04 22:59:04 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -8
    [junit] 12/10/04 22:59:04 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/10/04 22:59:04 WARN builtin.INDEXOF: No logger object provided to UDF: org.apache.pig.builtin.INDEXOF. Failed to process input; error - null
    [junit] 12/10/04 22:59:04 WARN builtin.LAST_INDEX_OF: No logger object provided to UDF: org.apache.pig.builtin.LAST_INDEX_OF. Failed to process input; error - null
    [junit] Tests run: 12, Failures: 0, Errors: 0, Time elapsed: 0.229 sec
   [delete] Deleting directory /tmp/pig_junit_tmp1608694859

BUILD FAILED
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:814: The following error occurred while executing this line:
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:882: Tests failed!

Total time: 20 minutes 21 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1327

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1327/changes>

Changes:

[dvryaboy] PIG-2935: Catch NoSuchMethodError when StoreFuncInterface's new cleanupOnSuccess method isn't implemented.

------------------------------------------
[...truncated 6887 lines...]
 [findbugs]   com.google.common.util.concurrent.CheckedFuture
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Reader$Scanner$Entry
 [findbugs]   org.apache.hadoop.fs.FSDataInputStream
 [findbugs]   org.python.core.PyObject
 [findbugs]   jline.History
 [findbugs]   org.jruby.embed.internal.LocalContextProvider
 [findbugs]   org.apache.hadoop.io.BooleanWritable
 [findbugs]   org.apache.log4j.Logger
 [findbugs]   org.apache.hadoop.hbase.filter.FamilyFilter
 [findbugs]   groovy.lang.Tuple
 [findbugs]   org.antlr.runtime.IntStream
 [findbugs]   org.apache.hadoop.util.ReflectionUtils
 [findbugs]   org.apache.hadoop.fs.ContentSummary
 [findbugs]   org.jruby.runtime.builtin.IRubyObject
 [findbugs]   org.jruby.RubyInteger
 [findbugs]   org.python.core.PyTuple
 [findbugs]   org.mortbay.log.Log
 [findbugs]   org.apache.hadoop.conf.Configuration
 [findbugs]   com.google.common.base.Joiner
 [findbugs]   org.apache.hadoop.mapreduce.lib.input.FileSplit
 [findbugs]   org.apache.hadoop.mapred.Counters$Counter
 [findbugs]   com.jcraft.jsch.Channel
 [findbugs]   org.apache.hadoop.mapred.JobPriority
 [findbugs]   org.apache.commons.cli.Options
 [findbugs]   org.apache.hadoop.mapred.JobID
 [findbugs]   org.apache.hadoop.util.bloom.BloomFilter
 [findbugs]   org.python.core.PyFrame
 [findbugs]   org.apache.hadoop.hbase.filter.CompareFilter
 [findbugs]   org.apache.hadoop.util.VersionInfo
 [findbugs]   org.python.core.PyString
 [findbugs]   org.apache.hadoop.io.Text$Comparator
 [findbugs]   org.jruby.runtime.Block
 [findbugs]   org.antlr.runtime.MismatchedSetException
 [findbugs]   org.apache.hadoop.io.BytesWritable
 [findbugs]   org.apache.hadoop.fs.FsShell
 [findbugs]   org.joda.time.Months
 [findbugs]   org.mozilla.javascript.ImporterTopLevel
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableOutputFormat
 [findbugs]   org.apache.hadoop.mapred.TaskReport
 [findbugs]   org.apache.hadoop.security.UserGroupInformation
 [findbugs]   org.antlr.runtime.tree.RewriteRuleSubtreeStream
 [findbugs]   org.apache.commons.cli.HelpFormatter
 [findbugs]   com.google.common.collect.Maps
 [findbugs]   org.joda.time.ReadableInstant
 [findbugs]   org.mozilla.javascript.NativeObject
 [findbugs]   org.apache.hadoop.hbase.HConstants
 [findbugs]   org.apache.hadoop.io.serializer.Deserializer
 [findbugs]   org.antlr.runtime.FailedPredicateException
 [findbugs]   org.apache.hadoop.io.compress.CompressionCodec
 [findbugs]   org.jruby.RubyNil
 [findbugs]   org.apache.hadoop.fs.FileStatus
 [findbugs]   org.apache.hadoop.hbase.client.Result
 [findbugs]   org.apache.hadoop.mapreduce.JobContext
 [findbugs]   org.codehaus.jackson.JsonGenerator
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptContext
 [findbugs]   org.apache.hadoop.io.BytesWritable$Comparator
 [findbugs]   org.apache.hadoop.io.LongWritable$Comparator
 [findbugs]   org.codehaus.jackson.map.util.LRUMap
 [findbugs]   org.apache.hadoop.hbase.util.Bytes
 [findbugs]   org.antlr.runtime.MismatchedTokenException
 [findbugs]   org.codehaus.jackson.JsonParser
 [findbugs]   com.jcraft.jsch.UserInfo
 [findbugs]   org.python.core.PyException
 [findbugs]   org.apache.commons.cli.ParseException
 [findbugs]   org.apache.hadoop.io.compress.CompressionOutputStream
 [findbugs]   org.apache.hadoop.hbase.filter.WritableByteArrayComparable
 [findbugs]   org.antlr.runtime.tree.CommonTreeNodeStream
 [findbugs]   org.apache.log4j.Level
 [findbugs]   org.apache.hadoop.hbase.client.Scan
 [findbugs]   org.jruby.anno.JRubyMethod
 [findbugs]   org.apache.hadoop.mapreduce.Job
 [findbugs]   com.google.common.util.concurrent.Futures
 [findbugs]   org.apache.commons.logging.LogFactory
 [findbugs]   org.apache.commons.codec.binary.Base64
 [findbugs]   org.codehaus.jackson.map.ObjectMapper
 [findbugs]   org.apache.hadoop.fs.FileSystem
 [findbugs]   org.jruby.embed.LocalContextScope
 [findbugs]   org.apache.hadoop.hbase.filter.FilterList$Operator
 [findbugs]   org.jruby.RubySymbol
 [findbugs]   org.apache.hadoop.hbase.io.ImmutableBytesWritable
 [findbugs]   org.apache.hadoop.io.serializer.SerializationFactory
 [findbugs]   org.antlr.runtime.tree.TreeAdaptor
 [findbugs]   org.apache.hadoop.mapred.RunningJob
 [findbugs]   org.antlr.runtime.CommonTokenStream
 [findbugs]   org.apache.hadoop.io.DataInputBuffer
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile
 [findbugs]   org.apache.commons.cli.GnuParser
 [findbugs]   org.mozilla.javascript.Context
 [findbugs]   org.apache.hadoop.io.FloatWritable
 [findbugs]   org.antlr.runtime.tree.RewriteEarlyExitException
 [findbugs]   org.apache.hadoop.hbase.HBaseConfiguration
 [findbugs]   org.codehaus.jackson.JsonGenerationException
 [findbugs]   org.apache.hadoop.mapreduce.TaskInputOutputContext
 [findbugs]   org.apache.hadoop.io.compress.GzipCodec
 [findbugs]   org.jruby.RubyString
 [findbugs]   org.antlr.runtime.BaseRecognizer
 [findbugs]   org.apache.hadoop.fs.FileUtil
 [findbugs]   org.apache.hadoop.fs.Path
 [findbugs]   org.jruby.RubyFixnum
 [findbugs]   org.apache.hadoop.hbase.client.Put
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Writer
 [findbugs]   jline.ConsoleReader
 [findbugs]   com.google.common.collect.Lists
 [findbugs]   org.apache.hadoop.mapreduce.MapContext
 [findbugs]   org.python.core.PyJavaPackage
 [findbugs]   org.apache.hadoop.hbase.filter.ColumnPrefixFilter
 [findbugs]   org.python.core.PyStringMap
 [findbugs]   groovy.util.ResourceException
 [findbugs]   org.joda.time.format.DateTimeFormat
 [findbugs]   org.apache.hadoop.mapreduce.TaskID
 [findbugs]   org.joda.time.Period
 [findbugs]   org.jruby.embed.ScriptingContainer
 [findbugs]   org.jruby.Ruby
 [findbugs]   groovy.util.GroovyScriptEngine
 [findbugs]   org.apache.hadoop.hbase.client.HTable
 [findbugs]   org.apache.hadoop.io.FloatWritable$Comparator
 [findbugs]   org.apache.zookeeper.ZooKeeper
 [findbugs]   org.codehaus.jackson.map.JsonMappingException
 [findbugs]   org.python.core.PyFunction
 [findbugs]   org.antlr.runtime.TokenSource
 [findbugs]   com.google.common.io.Files
 [findbugs]   groovy.util.ScriptException
 [findbugs]   com.jcraft.jsch.ChannelDirectTCPIP
 [findbugs]   com.jcraft.jsch.JSchException
 [findbugs]   org.python.util.PythonInterpreter
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil
 [findbugs]   org.python.core.PyInteger
 [findbugs]   org.apache.hadoop.mapred.JobConf
 [findbugs]   org.joda.time.DateTime
 [findbugs]   org.apache.hadoop.util.bloom.Key
 [findbugs]   org.joda.time.Years
 [findbugs]   org.apache.hadoop.io.Text
 [findbugs]   org.antlr.runtime.NoViableAltException
 [findbugs]   org.apache.hadoop.util.GenericOptionsParser
 [findbugs]   org.apache.hadoop.mapreduce.JobID
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptID
 [findbugs]   org.apache.hadoop.filecache.DistributedCache
 [findbugs]   org.joda.time.DateTimeZone
 [findbugs]   org.apache.hadoop.fs.FSDataOutputStream
 [findbugs]   org.python.core.PyList
 [findbugs]   org.antlr.runtime.tree.TreeNodeStream
 [findbugs]   org.apache.hadoop.hbase.filter.BinaryComparator
 [findbugs]   dk.brics.automaton.RegExp
 [findbugs]   org.mozilla.javascript.Scriptable
 [findbugs]   org.mozilla.javascript.EcmaError
 [findbugs]   org.apache.hadoop.io.serializer.Serializer
 [findbugs]   org.fusesource.jansi.AnsiConsole
 [findbugs]   org.apache.hadoop.util.bloom.Filter
 [findbugs]   org.python.core.PyNone
 [findbugs]   org.mozilla.javascript.Function
 [findbugs]   org.python.core.PySystemState
 [findbugs]   org.antlr.runtime.RecognizerSharedState
 [findbugs]   org.codehaus.jackson.JsonFactory
 [findbugs]   org.antlr.runtime.EarlyExitException
 [findbugs]   org.apache.hadoop.hdfs.DistributedFileSystem
 [findbugs]   org.apache.hadoop.util.LineReader
 [findbugs] Warnings generated: 48
 [findbugs] Missing classes: 284
 [findbugs] Calculating exit code...
 [findbugs] Setting 'missing class' flag (2)
 [findbugs] Setting 'bugs found' flag (1)
 [findbugs] Exit code set to: 3
 [findbugs] Java Result: 3
 [findbugs] Classes needed for analysis were missing
 [findbugs] Output saved to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml>
     [xslt] Processing <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml> to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.html>
     [xslt] Loading stylesheet /home/jenkins/tools/findbugs/latest/src/xsl/default.xsl

BUILD SUCCESSFUL
Total time: 6 minutes 33 seconds


======================================================================
======================================================================
STORE: saving artifacts
======================================================================
======================================================================




======================================================================
======================================================================
CLEAN: cleaning workspace
======================================================================
======================================================================


Buildfile: build.xml

clean:
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src-gen>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/docs/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/test/org/apache/pig/test/utils/dotGraph/parser>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig.jar>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig-withouthadoop.jar>

clean:

clean:

BUILD SUCCESSFUL
Total time: 0 seconds


======================================================================
======================================================================
ANALYSIS: ant -Drun.clover=true -Dclover.home=/homes/hudson/tools/clover/latest clover test-commit generate-clover-reports -Dtest.junit.output.format=xml -Dtest.output=yes -Dversion=${BUILD_ID} -Dfindbugs.home=$FINDBUGS_HOME -Djava5.home=$JAVA5_HOME -Dforrest.home=$FORREST_HOME -Dclover.home=$CLOVER_HOME -Declipse.home=$ECLIPSE_HOME
======================================================================
======================================================================


Buildfile: build.xml

clover.setup:
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/clover/db>
[clover-setup] Clover Version 3.1.0, built on May 31 2011 (build-821)
[clover-setup] Loaded from: /home/jenkins/tools/clover/latest/lib/clover.jar

BUILD FAILED
java.lang.RuntimeException: Clover upgrades for your license ended December 14 2010, and this version of Clover was built May 31 2011. Please visit http://www.atlassian.com/clover/renew for information on upgrading your license.
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:103)
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:25)
	at com.cenqua.clover.tasks.AbstractCloverTask.execute(AbstractCloverTask.java:52)
	at org.apache.tools.ant.UnknownElement.execute(UnknownElement.java:288)
	at sun.reflect.GeneratedMethodAccessor1.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
	at java.lang.reflect.Method.invoke(Method.java:597)
	at org.apache.tools.ant.dispatch.DispatchUtils.execute(DispatchUtils.java:106)
	at org.apache.tools.ant.Task.perform(Task.java:348)
	at org.apache.tools.ant.Target.execute(Target.java:357)
	at org.apache.tools.ant.Target.performTasks(Target.java:385)
	at org.apache.tools.ant.Project.executeSortedTargets(Project.java:1337)
	at org.apache.tools.ant.Project.executeTarget(Project.java:1306)
	at org.apache.tools.ant.helper.DefaultExecutor.executeTargets(DefaultExecutor.java:41)
	at org.apache.tools.ant.Project.executeTargets(Project.java:1189)
	at org.apache.tools.ant.Main.runBuild(Main.java:758)
	at org.apache.tools.ant.Main.startAnt(Main.java:217)
	at org.apache.tools.ant.launch.Launcher.run(Launcher.java:257)
	at org.apache.tools.ant.launch.Launcher.main(Launcher.java:104)

Total time: 1 second
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1326

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1326/changes>

Changes:

[gdfm] PIG-2920: e2e tests override PERL5LIB environment variable (azaroth)

[dvryaboy] PIG-2917: SpillableMemoryManager memory leak for WeakReference

[dvryaboy] PIG-2938: All unit tests that use MR2 MiniCluster are broken in trunk

------------------------------------------
[...truncated 35743 lines...]
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/10/01 22:33:41 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/10/01 22:33:41 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/10/01 22:33:42 INFO ipc.Server: Stopping server on 33143
    [junit] 12/10/01 22:33:42 INFO ipc.Server: Stopping IPC Server listener on 33143
    [junit] 12/10/01 22:33:42 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/01 22:33:42 INFO ipc.Server: IPC Server handler 0 on 33143: exiting
    [junit] 12/10/01 22:33:42 INFO ipc.Server: IPC Server handler 1 on 33143: exiting
    [junit] 12/10/01 22:33:42 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:39829, storageID=DS-1379621250-67.195.138.20-39829-1349130333086, infoPort=34321, ipcPort=33143):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/10/01 22:33:42 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/10/01 22:33:42 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/10/01 22:33:42 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/10/01 22:33:42 INFO ipc.Server: IPC Server handler 2 on 33143: exiting
    [junit] 12/10/01 22:33:42 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/10/01 22:33:42 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:39829, storageID=DS-1379621250-67.195.138.20-39829-1349130333086, infoPort=34321, ipcPort=33143):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data5/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data6/current'}>
    [junit] 12/10/01 22:33:42 INFO ipc.Server: Stopping server on 33143
    [junit] 12/10/01 22:33:42 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/01 22:33:42 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/01 22:33:42 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/10/01 22:33:42 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/10/01 22:33:42 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1634178828
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1634178828
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] Shutting down DataNode 1
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/10/01 22:33:42 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/10/01 22:33:42 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/10/01 22:33:42 INFO ipc.Server: Stopping server on 55775
    [junit] 12/10/01 22:33:42 INFO ipc.Server: IPC Server handler 0 on 55775: exiting
    [junit] 12/10/01 22:33:42 INFO ipc.Server: IPC Server handler 2 on 55775: exiting
    [junit] 12/10/01 22:33:42 INFO ipc.Server: IPC Server handler 1 on 55775: exiting
    [junit] 12/10/01 22:33:42 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/10/01 22:33:42 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/01 22:33:42 INFO ipc.Server: Stopping IPC Server listener on 55775
    [junit] 12/10/01 22:33:42 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/10/01 22:33:42 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:34506, storageID=DS-1640619591-67.195.138.20-34506-1349130332689, infoPort=56236, ipcPort=55775):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/10/01 22:33:42 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/10/01 22:33:42 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/10/01 22:33:42 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:39829 to delete  blk_2401882474519463549_1133 blk_-8025263235855048886_1134
    [junit] 12/10/01 22:33:42 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:35139 to delete  blk_2038525461950479832_1127 blk_2401882474519463549_1133 blk_-8025263235855048886_1134 blk_-7083562877273353023_1134
    [junit] 12/10/01 22:33:43 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/01 22:33:43 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:34506, storageID=DS-1640619591-67.195.138.20-34506-1349130332689, infoPort=56236, ipcPort=55775):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data3/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data4/current'}>
    [junit] 12/10/01 22:33:43 INFO ipc.Server: Stopping server on 55775
    [junit] 12/10/01 22:33:43 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/01 22:33:43 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/01 22:33:43 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/10/01 22:33:43 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/10/01 22:33:43 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1299723969
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1299723969
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] Shutting down DataNode 0
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/10/01 22:33:43 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/10/01 22:33:43 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/10/01 22:33:43 INFO ipc.Server: Stopping server on 56084
    [junit] 12/10/01 22:33:43 INFO ipc.Server: IPC Server handler 0 on 56084: exiting
    [junit] 12/10/01 22:33:43 INFO ipc.Server: IPC Server handler 2 on 56084: exiting
    [junit] 12/10/01 22:33:43 INFO ipc.Server: IPC Server handler 1 on 56084: exiting
    [junit] 12/10/01 22:33:43 INFO ipc.Server: Stopping IPC Server listener on 56084
    [junit] 12/10/01 22:33:43 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/01 22:33:43 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/10/01 22:33:43 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/10/01 22:33:43 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:60376, storageID=DS-2118569834-67.195.138.20-60376-1349130332289, infoPort=59368, ipcPort=56084):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/10/01 22:33:43 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/10/01 22:33:43 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/10/01 22:33:44 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/01 22:33:44 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:60376, storageID=DS-2118569834-67.195.138.20-60376-1349130332289, infoPort=59368, ipcPort=56084):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data1/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data2/current'}>
    [junit] 12/10/01 22:33:44 WARN util.MBeans: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.unRegisterMXBean(DataNode.java:513)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:726)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.run(DataNode.java:1442)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/10/01 22:33:44 INFO ipc.Server: Stopping server on 56084
    [junit] 12/10/01 22:33:44 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/01 22:33:44 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/10/01 22:33:44 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/10/01 22:33:44 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/10/01 22:33:44 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1328322703
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1328322703
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/10/01 22:33:44 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/10/01 22:33:44 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/10/01 22:33:44 INFO namenode.DecommissionManager: Interrupted Monitor
    [junit] java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.hdfs.server.namenode.DecommissionManager$Monitor.run(DecommissionManager.java:65)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/10/01 22:33:44 WARN namenode.FSNamesystem: ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 12/10/01 22:33:44 INFO namenode.FSNamesystem: Number of transactions: 629 Total time for transactions(ms): 9Number of transactions batched in Syncs: 131 Number of syncs: 432 SyncTimes(ms): 6576 274 
    [junit] 12/10/01 22:33:44 INFO ipc.Server: Stopping server on 55138
    [junit] 12/10/01 22:33:44 INFO ipc.Server: IPC Server handler 0 on 55138: exiting
    [junit] 12/10/01 22:33:44 INFO ipc.Server: IPC Server handler 1 on 55138: exiting
    [junit] 12/10/01 22:33:44 INFO ipc.Server: IPC Server handler 3 on 55138: exiting
    [junit] 12/10/01 22:33:44 INFO ipc.Server: IPC Server handler 6 on 55138: exiting
    [junit] 12/10/01 22:33:44 INFO ipc.Server: IPC Server handler 5 on 55138: exiting
    [junit] 12/10/01 22:33:44 INFO ipc.Server: IPC Server handler 7 on 55138: exiting
    [junit] 12/10/01 22:33:44 INFO ipc.Server: IPC Server handler 9 on 55138: exiting
    [junit] 12/10/01 22:33:44 INFO ipc.Server: IPC Server handler 2 on 55138: exiting
    [junit] 12/10/01 22:33:44 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/10/01 22:33:44 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/10/01 22:33:44 INFO ipc.Server: IPC Server handler 4 on 55138: exiting
    [junit] 12/10/01 22:33:44 INFO ipc.Server: IPC Server handler 8 on 55138: exiting
    [junit] 12/10/01 22:33:44 INFO ipc.Server: Stopping IPC Server listener on 55138
    [junit] Tests run: 17, Failures: 4, Errors: 3, Time elapsed: 485.324 sec
    [junit] 12/10/01 22:33:44 INFO mapred.TaskTracker: Received 'KillJobAction' for job: job_20121001222533526_0012
    [junit] 12/10/01 22:33:44 WARN mapred.TaskTracker: Unknown job job_20121001222533526_0012 being deleted.
    [junit] Test org.apache.pig.test.TestStore FAILED
    [junit] Running org.apache.pig.test.TestStringUDFs
    [junit] 12/10/01 22:33:45 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.NullPointerException
    [junit] 12/10/01 22:33:45 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/10/01 22:33:45 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -1
    [junit] 12/10/01 22:33:45 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -8
    [junit] 12/10/01 22:33:45 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/10/01 22:33:45 WARN builtin.INDEXOF: No logger object provided to UDF: org.apache.pig.builtin.INDEXOF. Failed to process input; error - null
    [junit] 12/10/01 22:33:45 WARN builtin.LAST_INDEX_OF: No logger object provided to UDF: org.apache.pig.builtin.LAST_INDEX_OF. Failed to process input; error - null
    [junit] Tests run: 12, Failures: 0, Errors: 0, Time elapsed: 0.24 sec
   [delete] Deleting directory /tmp/pig_junit_tmp1549906922

BUILD FAILED
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:814: The following error occurred while executing this line:
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:882: Tests failed!

Total time: 20 minutes 3 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1325

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1325/changes>

Changes:

[dvryaboy] PIG-2923: Lazily register bags with SpillableMemoryManager

------------------------------------------
[...truncated 36623 lines...]
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/09/28 22:53:48 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] Shutting down DataNode 2
    [junit] 12/09/28 22:53:48 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/09/28 22:53:48 INFO ipc.Server: Stopping server on 36326
    [junit] 12/09/28 22:53:48 INFO ipc.Server: IPC Server handler 0 on 36326: exiting
    [junit] 12/09/28 22:53:48 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/09/28 22:53:48 INFO ipc.Server: IPC Server handler 2 on 36326: exiting
    [junit] 12/09/28 22:53:48 INFO ipc.Server: IPC Server handler 1 on 36326: exiting
    [junit] 12/09/28 22:53:48 INFO ipc.Server: Stopping IPC Server listener on 36326
    [junit] 12/09/28 22:53:48 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/28 22:53:48 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/09/28 22:53:48 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:56110, storageID=DS-1197903393-67.195.138.24-56110-1348872084901, infoPort=46246, ipcPort=36326):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/09/28 22:53:48 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/09/28 22:53:48 INFO mapred.TaskTracker: Received 'KillJobAction' for job: job_20120928224126090_0012
    [junit] 12/09/28 22:53:48 WARN mapred.TaskTracker: Unknown job job_20120928224126090_0012 being deleted.
    [junit] 12/09/28 22:53:49 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:56110, storageID=DS-1197903393-67.195.138.24-56110-1348872084901, infoPort=46246, ipcPort=36326):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data5/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data6/current'}>
    [junit] 12/09/28 22:53:49 INFO ipc.Server: Stopping server on 36326
    [junit] 12/09/28 22:53:49 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/28 22:53:49 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/09/28 22:53:49 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/09/28 22:53:49 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/09/28 22:53:49 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/09/28 22:53:49 INFO mapred.TaskTracker: Received 'KillJobAction' for job: job_20120928224126090_0012
    [junit] 12/09/28 22:53:49 WARN mapred.TaskTracker: Unknown job job_20120928224126090_0012 being deleted.
    [junit] 12/09/28 22:53:49 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/09/28 22:53:49 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1969685761
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1969685761
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/09/28 22:53:49 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] Shutting down DataNode 1
    [junit] 12/09/28 22:53:49 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/09/28 22:53:49 INFO ipc.Server: Stopping server on 50828
    [junit] 12/09/28 22:53:49 INFO ipc.Server: IPC Server handler 0 on 50828: exiting
    [junit] 12/09/28 22:53:49 INFO ipc.Server: IPC Server handler 1 on 50828: exiting
    [junit] 12/09/28 22:53:49 INFO ipc.Server: IPC Server handler 2 on 50828: exiting
    [junit] 12/09/28 22:53:49 INFO ipc.Server: Stopping IPC Server listener on 50828
    [junit] 12/09/28 22:53:49 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/28 22:53:49 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/09/28 22:53:49 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/09/28 22:53:49 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:46729, storageID=DS-1661774088-67.195.138.24-46729-1348872084031, infoPort=38454, ipcPort=50828):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/09/28 22:53:49 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/09/28 22:53:50 INFO datanode.DataNode: Scheduling block blk_1567366168343469810_1134 file build/test/data/dfs/data/data2/current/blk_1567366168343469810 for deletion
    [junit] 12/09/28 22:53:50 INFO datanode.DataNode: Deleted block blk_1567366168343469810_1134 at file build/test/data/dfs/data/data2/current/blk_1567366168343469810
    [junit] 12/09/28 22:53:50 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/09/28 22:53:50 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:47396 to delete  blk_-1298540438071152972_1127 blk_4906578808599907683_1134 blk_1567366168343469810_1134
    [junit] 12/09/28 22:53:50 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:46729 to delete  blk_7377763408698589741_1133
    [junit] 12/09/28 22:53:50 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/09/28 22:53:50 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:46729, storageID=DS-1661774088-67.195.138.24-46729-1348872084031, infoPort=38454, ipcPort=50828):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data3/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data4/current'}>
    [junit] 12/09/28 22:53:50 INFO ipc.Server: Stopping server on 50828
    [junit] 12/09/28 22:53:50 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/28 22:53:50 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/09/28 22:53:50 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/09/28 22:53:50 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/09/28 22:53:50 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId768446743
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId768446743
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] Shutting down DataNode 0
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/09/28 22:53:50 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/09/28 22:53:50 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/09/28 22:53:51 INFO ipc.Server: Stopping server on 45399
    [junit] 12/09/28 22:53:51 INFO ipc.Server: IPC Server handler 0 on 45399: exiting
    [junit] 12/09/28 22:53:51 INFO ipc.Server: Stopping IPC Server listener on 45399
    [junit] 12/09/28 22:53:51 INFO ipc.Server: IPC Server handler 1 on 45399: exiting
    [junit] 12/09/28 22:53:51 INFO ipc.Server: IPC Server handler 2 on 45399: exiting
    [junit] 12/09/28 22:53:51 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/28 22:53:51 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/09/28 22:53:51 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/09/28 22:53:51 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:45914, storageID=DS-1202309860-67.195.138.24-45914-1348872082985, infoPort=49632, ipcPort=45399):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/09/28 22:53:51 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/09/28 22:53:51 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/09/28 22:53:52 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/09/28 22:53:52 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:45914, storageID=DS-1202309860-67.195.138.24-45914-1348872082985, infoPort=49632, ipcPort=45399):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data1/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data2/current'}>
    [junit] 12/09/28 22:53:52 WARN util.MBeans: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.unRegisterMXBean(DataNode.java:513)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:726)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.run(DataNode.java:1442)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/09/28 22:53:52 INFO ipc.Server: Stopping server on 45399
    [junit] 12/09/28 22:53:52 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/28 22:53:52 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/09/28 22:53:52 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/09/28 22:53:52 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/09/28 22:53:52 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1416524779
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1416524779
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/09/28 22:53:52 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/09/28 22:53:52 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/09/28 22:53:52 WARN namenode.FSNamesystem: ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 12/09/28 22:53:52 INFO namenode.DecommissionManager: Interrupted Monitor
    [junit] java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.hdfs.server.namenode.DecommissionManager$Monitor.run(DecommissionManager.java:65)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/09/28 22:53:52 INFO namenode.FSNamesystem: Number of transactions: 629 Total time for transactions(ms): 9Number of transactions batched in Syncs: 133 Number of syncs: 431 SyncTimes(ms): 4239 926 
    [junit] 12/09/28 22:53:52 INFO ipc.Server: Stopping server on 51214
    [junit] 12/09/28 22:53:52 INFO ipc.Server: IPC Server handler 5 on 51214: exiting
    [junit] 12/09/28 22:53:52 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/28 22:53:52 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/09/28 22:53:52 INFO ipc.Server: Stopping IPC Server listener on 51214
    [junit] 12/09/28 22:53:52 INFO ipc.Server: IPC Server handler 2 on 51214: exiting
    [junit] 12/09/28 22:53:52 INFO ipc.Server: IPC Server handler 7 on 51214: exiting
    [junit] 12/09/28 22:53:52 INFO ipc.Server: IPC Server handler 1 on 51214: exiting
    [junit] Tests run: 17, Failures: 4, Errors: 3, Time elapsed: 737.674 sec
    [junit] 12/09/28 22:53:52 INFO ipc.Server: IPC Server handler 8 on 51214: exiting
    [junit] 12/09/28 22:53:52 INFO ipc.Server: IPC Server handler 4 on 51214: exiting
    [junit] 12/09/28 22:53:52 INFO ipc.Server: IPC Server handler 0 on 51214: exiting
    [junit] 12/09/28 22:53:52 INFO ipc.Server: IPC Server handler 6 on 51214: exiting
    [junit] 12/09/28 22:53:52 INFO ipc.Server: IPC Server handler 3 on 51214: exiting
    [junit] 12/09/28 22:53:52 INFO ipc.Server: IPC Server handler 9 on 51214: exiting
    [junit] Test org.apache.pig.test.TestStore FAILED
    [junit] Running org.apache.pig.test.TestStringUDFs
    [junit] 12/09/28 22:53:54 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.NullPointerException
    [junit] 12/09/28 22:53:54 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/09/28 22:53:54 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -1
    [junit] 12/09/28 22:53:54 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -8
    [junit] 12/09/28 22:53:54 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/09/28 22:53:54 WARN builtin.INDEXOF: No logger object provided to UDF: org.apache.pig.builtin.INDEXOF. Failed to process input; error - null
    [junit] 12/09/28 22:53:54 WARN builtin.LAST_INDEX_OF: No logger object provided to UDF: org.apache.pig.builtin.LAST_INDEX_OF. Failed to process input; error - null
    [junit] Tests run: 12, Failures: 0, Errors: 0, Time elapsed: 0.439 sec
   [delete] Deleting directory /tmp/pig_junit_tmp1715207785

BUILD FAILED
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:814: The following error occurred while executing this line:
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:882: Tests failed!

Total time: 29 minutes 21 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1324

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1324/changes>

Changes:

[jcoveney] Adding tests for PIG-2936 (jcoveney)

------------------------------------------
[...truncated 6614 lines...]
 [findbugs]   com.google.common.util.concurrent.CheckedFuture
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Reader$Scanner$Entry
 [findbugs]   org.apache.hadoop.fs.FSDataInputStream
 [findbugs]   org.python.core.PyObject
 [findbugs]   jline.History
 [findbugs]   org.jruby.embed.internal.LocalContextProvider
 [findbugs]   org.apache.hadoop.io.BooleanWritable
 [findbugs]   org.apache.log4j.Logger
 [findbugs]   org.apache.hadoop.hbase.filter.FamilyFilter
 [findbugs]   groovy.lang.Tuple
 [findbugs]   org.antlr.runtime.IntStream
 [findbugs]   org.apache.hadoop.util.ReflectionUtils
 [findbugs]   org.apache.hadoop.fs.ContentSummary
 [findbugs]   org.jruby.runtime.builtin.IRubyObject
 [findbugs]   org.jruby.RubyInteger
 [findbugs]   org.python.core.PyTuple
 [findbugs]   org.mortbay.log.Log
 [findbugs]   org.apache.hadoop.conf.Configuration
 [findbugs]   com.google.common.base.Joiner
 [findbugs]   org.apache.hadoop.mapreduce.lib.input.FileSplit
 [findbugs]   org.apache.hadoop.mapred.Counters$Counter
 [findbugs]   com.jcraft.jsch.Channel
 [findbugs]   org.apache.hadoop.mapred.JobPriority
 [findbugs]   org.apache.commons.cli.Options
 [findbugs]   org.apache.hadoop.mapred.JobID
 [findbugs]   org.apache.hadoop.util.bloom.BloomFilter
 [findbugs]   org.python.core.PyFrame
 [findbugs]   org.apache.hadoop.hbase.filter.CompareFilter
 [findbugs]   org.apache.hadoop.util.VersionInfo
 [findbugs]   org.python.core.PyString
 [findbugs]   org.apache.hadoop.io.Text$Comparator
 [findbugs]   org.jruby.runtime.Block
 [findbugs]   org.antlr.runtime.MismatchedSetException
 [findbugs]   org.apache.hadoop.io.BytesWritable
 [findbugs]   org.apache.hadoop.fs.FsShell
 [findbugs]   org.joda.time.Months
 [findbugs]   org.mozilla.javascript.ImporterTopLevel
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableOutputFormat
 [findbugs]   org.apache.hadoop.mapred.TaskReport
 [findbugs]   org.apache.hadoop.security.UserGroupInformation
 [findbugs]   org.antlr.runtime.tree.RewriteRuleSubtreeStream
 [findbugs]   org.apache.commons.cli.HelpFormatter
 [findbugs]   com.google.common.collect.Maps
 [findbugs]   org.joda.time.ReadableInstant
 [findbugs]   org.mozilla.javascript.NativeObject
 [findbugs]   org.apache.hadoop.hbase.HConstants
 [findbugs]   org.apache.hadoop.io.serializer.Deserializer
 [findbugs]   org.antlr.runtime.FailedPredicateException
 [findbugs]   org.apache.hadoop.io.compress.CompressionCodec
 [findbugs]   org.jruby.RubyNil
 [findbugs]   org.apache.hadoop.fs.FileStatus
 [findbugs]   org.apache.hadoop.hbase.client.Result
 [findbugs]   org.apache.hadoop.mapreduce.JobContext
 [findbugs]   org.codehaus.jackson.JsonGenerator
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptContext
 [findbugs]   org.apache.hadoop.io.BytesWritable$Comparator
 [findbugs]   org.apache.hadoop.io.LongWritable$Comparator
 [findbugs]   org.codehaus.jackson.map.util.LRUMap
 [findbugs]   org.apache.hadoop.hbase.util.Bytes
 [findbugs]   org.antlr.runtime.MismatchedTokenException
 [findbugs]   org.codehaus.jackson.JsonParser
 [findbugs]   com.jcraft.jsch.UserInfo
 [findbugs]   org.python.core.PyException
 [findbugs]   org.apache.commons.cli.ParseException
 [findbugs]   org.apache.hadoop.io.compress.CompressionOutputStream
 [findbugs]   org.apache.hadoop.hbase.filter.WritableByteArrayComparable
 [findbugs]   org.antlr.runtime.tree.CommonTreeNodeStream
 [findbugs]   org.apache.log4j.Level
 [findbugs]   org.apache.hadoop.hbase.client.Scan
 [findbugs]   org.jruby.anno.JRubyMethod
 [findbugs]   org.apache.hadoop.mapreduce.Job
 [findbugs]   com.google.common.util.concurrent.Futures
 [findbugs]   org.apache.commons.logging.LogFactory
 [findbugs]   org.apache.commons.codec.binary.Base64
 [findbugs]   org.codehaus.jackson.map.ObjectMapper
 [findbugs]   org.apache.hadoop.fs.FileSystem
 [findbugs]   org.jruby.embed.LocalContextScope
 [findbugs]   org.apache.hadoop.hbase.filter.FilterList$Operator
 [findbugs]   org.jruby.RubySymbol
 [findbugs]   org.apache.hadoop.hbase.io.ImmutableBytesWritable
 [findbugs]   org.apache.hadoop.io.serializer.SerializationFactory
 [findbugs]   org.antlr.runtime.tree.TreeAdaptor
 [findbugs]   org.apache.hadoop.mapred.RunningJob
 [findbugs]   org.antlr.runtime.CommonTokenStream
 [findbugs]   org.apache.hadoop.io.DataInputBuffer
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile
 [findbugs]   org.apache.commons.cli.GnuParser
 [findbugs]   org.mozilla.javascript.Context
 [findbugs]   org.apache.hadoop.io.FloatWritable
 [findbugs]   org.antlr.runtime.tree.RewriteEarlyExitException
 [findbugs]   org.apache.hadoop.hbase.HBaseConfiguration
 [findbugs]   org.codehaus.jackson.JsonGenerationException
 [findbugs]   org.apache.hadoop.mapreduce.TaskInputOutputContext
 [findbugs]   org.apache.hadoop.io.compress.GzipCodec
 [findbugs]   org.jruby.RubyString
 [findbugs]   org.antlr.runtime.BaseRecognizer
 [findbugs]   org.apache.hadoop.fs.FileUtil
 [findbugs]   org.apache.hadoop.fs.Path
 [findbugs]   org.jruby.RubyFixnum
 [findbugs]   org.apache.hadoop.hbase.client.Put
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Writer
 [findbugs]   jline.ConsoleReader
 [findbugs]   com.google.common.collect.Lists
 [findbugs]   org.apache.hadoop.mapreduce.MapContext
 [findbugs]   org.python.core.PyJavaPackage
 [findbugs]   org.apache.hadoop.hbase.filter.ColumnPrefixFilter
 [findbugs]   org.python.core.PyStringMap
 [findbugs]   groovy.util.ResourceException
 [findbugs]   org.joda.time.format.DateTimeFormat
 [findbugs]   org.apache.hadoop.mapreduce.TaskID
 [findbugs]   org.joda.time.Period
 [findbugs]   org.jruby.embed.ScriptingContainer
 [findbugs]   org.jruby.Ruby
 [findbugs]   groovy.util.GroovyScriptEngine
 [findbugs]   org.apache.hadoop.hbase.client.HTable
 [findbugs]   org.apache.hadoop.io.FloatWritable$Comparator
 [findbugs]   org.apache.zookeeper.ZooKeeper
 [findbugs]   org.codehaus.jackson.map.JsonMappingException
 [findbugs]   org.python.core.PyFunction
 [findbugs]   org.antlr.runtime.TokenSource
 [findbugs]   com.google.common.io.Files
 [findbugs]   groovy.util.ScriptException
 [findbugs]   com.jcraft.jsch.ChannelDirectTCPIP
 [findbugs]   com.jcraft.jsch.JSchException
 [findbugs]   org.python.util.PythonInterpreter
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil
 [findbugs]   org.python.core.PyInteger
 [findbugs]   org.apache.hadoop.mapred.JobConf
 [findbugs]   org.joda.time.DateTime
 [findbugs]   org.apache.hadoop.util.bloom.Key
 [findbugs]   org.joda.time.Years
 [findbugs]   org.apache.hadoop.io.Text
 [findbugs]   org.antlr.runtime.NoViableAltException
 [findbugs]   org.apache.hadoop.util.GenericOptionsParser
 [findbugs]   org.apache.hadoop.mapreduce.JobID
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptID
 [findbugs]   org.apache.hadoop.filecache.DistributedCache
 [findbugs]   org.joda.time.DateTimeZone
 [findbugs]   org.apache.hadoop.fs.FSDataOutputStream
 [findbugs]   org.python.core.PyList
 [findbugs]   org.antlr.runtime.tree.TreeNodeStream
 [findbugs]   org.apache.hadoop.hbase.filter.BinaryComparator
 [findbugs]   dk.brics.automaton.RegExp
 [findbugs]   org.mozilla.javascript.Scriptable
 [findbugs]   org.mozilla.javascript.EcmaError
 [findbugs]   org.apache.hadoop.io.serializer.Serializer
 [findbugs]   org.fusesource.jansi.AnsiConsole
 [findbugs]   org.apache.hadoop.util.bloom.Filter
 [findbugs]   org.python.core.PyNone
 [findbugs]   org.mozilla.javascript.Function
 [findbugs]   org.python.core.PySystemState
 [findbugs]   org.antlr.runtime.RecognizerSharedState
 [findbugs]   org.codehaus.jackson.JsonFactory
 [findbugs]   org.antlr.runtime.EarlyExitException
 [findbugs]   org.apache.hadoop.hdfs.DistributedFileSystem
 [findbugs]   org.apache.hadoop.util.LineReader
 [findbugs] Warnings generated: 47
 [findbugs] Missing classes: 284
 [findbugs] Calculating exit code...
 [findbugs] Setting 'missing class' flag (2)
 [findbugs] Setting 'bugs found' flag (1)
 [findbugs] Exit code set to: 3
 [findbugs] Java Result: 3
 [findbugs] Classes needed for analysis were missing
 [findbugs] Output saved to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml>
     [xslt] Processing <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml> to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.html>
     [xslt] Loading stylesheet /home/jenkins/tools/findbugs/latest/src/xsl/default.xsl

BUILD SUCCESSFUL
Total time: 5 minutes 28 seconds


======================================================================
======================================================================
STORE: saving artifacts
======================================================================
======================================================================




======================================================================
======================================================================
CLEAN: cleaning workspace
======================================================================
======================================================================


Buildfile: build.xml

clean:
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src-gen>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/docs/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/test/org/apache/pig/test/utils/dotGraph/parser>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig.jar>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig-withouthadoop.jar>

clean:

clean:

BUILD SUCCESSFUL
Total time: 0 seconds


======================================================================
======================================================================
ANALYSIS: ant -Drun.clover=true -Dclover.home=/homes/hudson/tools/clover/latest clover test-commit generate-clover-reports -Dtest.junit.output.format=xml -Dtest.output=yes -Dversion=${BUILD_ID} -Dfindbugs.home=$FINDBUGS_HOME -Djava5.home=$JAVA5_HOME -Dforrest.home=$FORREST_HOME -Dclover.home=$CLOVER_HOME -Declipse.home=$ECLIPSE_HOME
======================================================================
======================================================================


Buildfile: build.xml

clover.setup:
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/clover/db>
[clover-setup] Clover Version 3.1.0, built on May 31 2011 (build-821)
[clover-setup] Loaded from: /home/jenkins/tools/clover/latest/lib/clover.jar

BUILD FAILED
java.lang.RuntimeException: Clover upgrades for your license ended December 14 2010, and this version of Clover was built May 31 2011. Please visit http://www.atlassian.com/clover/renew for information on upgrading your license.
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:103)
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:25)
	at com.cenqua.clover.tasks.AbstractCloverTask.execute(AbstractCloverTask.java:52)
	at org.apache.tools.ant.UnknownElement.execute(UnknownElement.java:288)
	at sun.reflect.GeneratedMethodAccessor1.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
	at java.lang.reflect.Method.invoke(Method.java:597)
	at org.apache.tools.ant.dispatch.DispatchUtils.execute(DispatchUtils.java:106)
	at org.apache.tools.ant.Task.perform(Task.java:348)
	at org.apache.tools.ant.Target.execute(Target.java:357)
	at org.apache.tools.ant.Target.performTasks(Target.java:385)
	at org.apache.tools.ant.Project.executeSortedTargets(Project.java:1337)
	at org.apache.tools.ant.Project.executeTarget(Project.java:1306)
	at org.apache.tools.ant.helper.DefaultExecutor.executeTargets(DefaultExecutor.java:41)
	at org.apache.tools.ant.Project.executeTargets(Project.java:1189)
	at org.apache.tools.ant.Main.runBuild(Main.java:758)
	at org.apache.tools.ant.Main.startAnt(Main.java:217)
	at org.apache.tools.ant.launch.Launcher.run(Launcher.java:257)
	at org.apache.tools.ant.launch.Launcher.main(Launcher.java:104)

Total time: 0 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1323

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1323/changes>

Changes:

[jcoveney] PIG-2936: Tuple serialization bug (jcoveney)

[billgraham] PIG-2929: Improve documentation around AVG, CONCAT, MIN, MAX (cheolsoo via billgraham)

------------------------------------------
[...truncated 36142 lines...]
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/09/27 23:23:06 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] Shutting down DataNode 2
    [junit] 12/09/27 23:23:06 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/09/27 23:23:06 INFO ipc.Server: Stopping server on 40341
    [junit] 12/09/27 23:23:06 INFO ipc.Server: IPC Server handler 1 on 40341: exiting
    [junit] 12/09/27 23:23:06 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/27 23:23:06 INFO ipc.Server: IPC Server handler 0 on 40341: exiting
    [junit] 12/09/27 23:23:06 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:51628, storageID=DS-383646298-67.195.138.24-51628-1348787453695, infoPort=36753, ipcPort=40341):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/09/27 23:23:06 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/09/27 23:23:06 INFO ipc.Server: IPC Server handler 2 on 40341: exiting
    [junit] 12/09/27 23:23:06 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/09/27 23:23:06 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/09/27 23:23:06 INFO ipc.Server: Stopping IPC Server listener on 40341
    [junit] 12/09/27 23:23:06 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/09/27 23:23:06 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:51628, storageID=DS-383646298-67.195.138.24-51628-1348787453695, infoPort=36753, ipcPort=40341):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data5/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data6/current'}>
    [junit] 12/09/27 23:23:06 INFO ipc.Server: Stopping server on 40341
    [junit] 12/09/27 23:23:06 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/27 23:23:06 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/09/27 23:23:06 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/09/27 23:23:06 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/09/27 23:23:06 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-397625939
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-397625939
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] Shutting down DataNode 1
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/09/27 23:23:06 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/09/27 23:23:06 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/09/27 23:23:06 INFO ipc.Server: Stopping server on 54881
    [junit] 12/09/27 23:23:06 INFO ipc.Server: IPC Server handler 0 on 54881: exiting
    [junit] 12/09/27 23:23:06 INFO ipc.Server: IPC Server handler 2 on 54881: exiting
    [junit] 12/09/27 23:23:06 INFO ipc.Server: IPC Server handler 1 on 54881: exiting
    [junit] 12/09/27 23:23:06 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/09/27 23:23:06 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/27 23:23:06 INFO ipc.Server: Stopping IPC Server listener on 54881
    [junit] 12/09/27 23:23:06 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/09/27 23:23:06 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:42618, storageID=DS-1504483847-67.195.138.24-42618-1348787452776, infoPort=39223, ipcPort=54881):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/09/27 23:23:06 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/09/27 23:23:07 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/09/27 23:23:07 INFO datanode.DataNode: Scheduling block blk_-271229790202754443_1134 file build/test/data/dfs/data/data2/current/blk_-271229790202754443 for deletion
    [junit] 12/09/27 23:23:07 INFO datanode.DataNode: Scheduling block blk_446744463370653368_1134 file build/test/data/dfs/data/data1/current/blk_446744463370653368 for deletion
    [junit] 12/09/27 23:23:07 INFO datanode.DataNode: Deleted block blk_-271229790202754443_1134 at file build/test/data/dfs/data/data2/current/blk_-271229790202754443
    [junit] 12/09/27 23:23:07 INFO datanode.DataNode: Deleted block blk_446744463370653368_1134 at file build/test/data/dfs/data/data1/current/blk_446744463370653368
    [junit] 12/09/27 23:23:07 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/09/27 23:23:07 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:42618, storageID=DS-1504483847-67.195.138.24-42618-1348787452776, infoPort=39223, ipcPort=54881):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data3/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data4/current'}>
    [junit] 12/09/27 23:23:07 INFO ipc.Server: Stopping server on 54881
    [junit] 12/09/27 23:23:07 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/27 23:23:07 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/09/27 23:23:07 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/09/27 23:23:07 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/09/27 23:23:07 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1392934500
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1392934500
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/09/27 23:23:07 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] Shutting down DataNode 0
    [junit] 12/09/27 23:23:07 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/09/27 23:23:08 INFO ipc.Server: Stopping server on 48094
    [junit] 12/09/27 23:23:08 INFO ipc.Server: IPC Server handler 0 on 48094: exiting
    [junit] 12/09/27 23:23:08 INFO ipc.Server: IPC Server handler 1 on 48094: exiting
    [junit] 12/09/27 23:23:08 INFO ipc.Server: IPC Server handler 2 on 48094: exiting
    [junit] 12/09/27 23:23:08 INFO ipc.Server: Stopping IPC Server listener on 48094
    [junit] 12/09/27 23:23:08 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/09/27 23:23:08 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/27 23:23:08 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/09/27 23:23:08 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:42009, storageID=DS-2129681787-67.195.138.24-42009-1348787451799, infoPort=47246, ipcPort=48094):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/09/27 23:23:08 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/09/27 23:23:08 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/09/27 23:23:09 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/09/27 23:23:09 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:42009, storageID=DS-2129681787-67.195.138.24-42009-1348787451799, infoPort=47246, ipcPort=48094):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data1/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data2/current'}>
    [junit] 12/09/27 23:23:09 WARN util.MBeans: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.unRegisterMXBean(DataNode.java:513)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:726)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.run(DataNode.java:1442)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/09/27 23:23:09 INFO ipc.Server: Stopping server on 48094
    [junit] 12/09/27 23:23:09 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/27 23:23:09 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/09/27 23:23:09 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/09/27 23:23:09 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/09/27 23:23:09 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId566847199
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId566847199
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/09/27 23:23:09 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/09/27 23:23:09 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/09/27 23:23:09 INFO namenode.DecommissionManager: Interrupted Monitor
    [junit] java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.hdfs.server.namenode.DecommissionManager$Monitor.run(DecommissionManager.java:65)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/09/27 23:23:09 INFO namenode.FSNamesystem: Number of transactions: 629 Total time for transactions(ms): 12Number of transactions batched in Syncs: 132 Number of syncs: 432 SyncTimes(ms): 6008 398 
    [junit] 12/09/27 23:23:09 WARN namenode.FSNamesystem: ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 12/09/27 23:23:09 INFO ipc.Server: Stopping server on 59079
    [junit] 12/09/27 23:23:09 INFO ipc.Server: Stopping IPC Server listener on 59079
    [junit] 12/09/27 23:23:09 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/27 23:23:09 INFO ipc.Server: IPC Server handler 6 on 59079: exiting
    [junit] 12/09/27 23:23:09 INFO ipc.Server: IPC Server handler 3 on 59079: exiting
    [junit] 12/09/27 23:23:09 INFO ipc.Server: IPC Server handler 0 on 59079: exiting
    [junit] 12/09/27 23:23:09 INFO ipc.Server: IPC Server handler 2 on 59079: exiting
    [junit] 12/09/27 23:23:09 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/09/27 23:23:09 INFO ipc.Server: IPC Server handler 7 on 59079: exiting
    [junit] Tests run: 17, Failures: 4, Errors: 3, Time elapsed: 725.827 sec
    [junit] 12/09/27 23:23:09 INFO ipc.Server: IPC Server handler 9 on 59079: exiting
    [junit] 12/09/27 23:23:09 INFO ipc.Server: IPC Server handler 4 on 59079: exiting
    [junit] 12/09/27 23:23:09 INFO ipc.Server: IPC Server handler 5 on 59079: exiting
    [junit] 12/09/27 23:23:09 INFO ipc.Server: IPC Server handler 8 on 59079: exiting
    [junit] 12/09/27 23:23:09 INFO ipc.Server: IPC Server handler 1 on 59079: exiting
    [junit] Test org.apache.pig.test.TestStore FAILED
    [junit] Running org.apache.pig.test.TestStringUDFs
    [junit] 12/09/27 23:23:11 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.NullPointerException
    [junit] 12/09/27 23:23:11 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/09/27 23:23:11 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -1
    [junit] 12/09/27 23:23:11 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -8
    [junit] 12/09/27 23:23:11 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/09/27 23:23:11 WARN builtin.INDEXOF: No logger object provided to UDF: org.apache.pig.builtin.INDEXOF. Failed to process input; error - null
    [junit] 12/09/27 23:23:11 WARN builtin.LAST_INDEX_OF: No logger object provided to UDF: org.apache.pig.builtin.LAST_INDEX_OF. Failed to process input; error - null
    [junit] Tests run: 12, Failures: 0, Errors: 0, Time elapsed: 0.454 sec
   [delete] Deleting directory /tmp/pig_junit_tmp1509676356

BUILD FAILED
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:814: The following error occurred while executing this line:
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:882: Tests failed!

Total time: 28 minutes 46 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1322

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1322/changes>

Changes:

[daijy] PIG-2930: ant test doesn't compile in trunk

------------------------------------------
[...truncated 6698 lines...]
 [findbugs]   com.google.common.util.concurrent.CheckedFuture
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Reader$Scanner$Entry
 [findbugs]   org.apache.hadoop.fs.FSDataInputStream
 [findbugs]   org.python.core.PyObject
 [findbugs]   jline.History
 [findbugs]   org.jruby.embed.internal.LocalContextProvider
 [findbugs]   org.apache.hadoop.io.BooleanWritable
 [findbugs]   org.apache.log4j.Logger
 [findbugs]   org.apache.hadoop.hbase.filter.FamilyFilter
 [findbugs]   groovy.lang.Tuple
 [findbugs]   org.antlr.runtime.IntStream
 [findbugs]   org.apache.hadoop.util.ReflectionUtils
 [findbugs]   org.apache.hadoop.fs.ContentSummary
 [findbugs]   org.jruby.runtime.builtin.IRubyObject
 [findbugs]   org.jruby.RubyInteger
 [findbugs]   org.python.core.PyTuple
 [findbugs]   org.mortbay.log.Log
 [findbugs]   org.apache.hadoop.conf.Configuration
 [findbugs]   com.google.common.base.Joiner
 [findbugs]   org.apache.hadoop.mapreduce.lib.input.FileSplit
 [findbugs]   org.apache.hadoop.mapred.Counters$Counter
 [findbugs]   com.jcraft.jsch.Channel
 [findbugs]   org.apache.hadoop.mapred.JobPriority
 [findbugs]   org.apache.commons.cli.Options
 [findbugs]   org.apache.hadoop.mapred.JobID
 [findbugs]   org.apache.hadoop.util.bloom.BloomFilter
 [findbugs]   org.python.core.PyFrame
 [findbugs]   org.apache.hadoop.hbase.filter.CompareFilter
 [findbugs]   org.apache.hadoop.util.VersionInfo
 [findbugs]   org.python.core.PyString
 [findbugs]   org.apache.hadoop.io.Text$Comparator
 [findbugs]   org.jruby.runtime.Block
 [findbugs]   org.antlr.runtime.MismatchedSetException
 [findbugs]   org.apache.hadoop.io.BytesWritable
 [findbugs]   org.apache.hadoop.fs.FsShell
 [findbugs]   org.joda.time.Months
 [findbugs]   org.mozilla.javascript.ImporterTopLevel
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableOutputFormat
 [findbugs]   org.apache.hadoop.mapred.TaskReport
 [findbugs]   org.apache.hadoop.security.UserGroupInformation
 [findbugs]   org.antlr.runtime.tree.RewriteRuleSubtreeStream
 [findbugs]   org.apache.commons.cli.HelpFormatter
 [findbugs]   com.google.common.collect.Maps
 [findbugs]   org.joda.time.ReadableInstant
 [findbugs]   org.mozilla.javascript.NativeObject
 [findbugs]   org.apache.hadoop.hbase.HConstants
 [findbugs]   org.apache.hadoop.io.serializer.Deserializer
 [findbugs]   org.antlr.runtime.FailedPredicateException
 [findbugs]   org.apache.hadoop.io.compress.CompressionCodec
 [findbugs]   org.jruby.RubyNil
 [findbugs]   org.apache.hadoop.fs.FileStatus
 [findbugs]   org.apache.hadoop.hbase.client.Result
 [findbugs]   org.apache.hadoop.mapreduce.JobContext
 [findbugs]   org.codehaus.jackson.JsonGenerator
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptContext
 [findbugs]   org.apache.hadoop.io.BytesWritable$Comparator
 [findbugs]   org.apache.hadoop.io.LongWritable$Comparator
 [findbugs]   org.codehaus.jackson.map.util.LRUMap
 [findbugs]   org.apache.hadoop.hbase.util.Bytes
 [findbugs]   org.antlr.runtime.MismatchedTokenException
 [findbugs]   org.codehaus.jackson.JsonParser
 [findbugs]   com.jcraft.jsch.UserInfo
 [findbugs]   org.python.core.PyException
 [findbugs]   org.apache.commons.cli.ParseException
 [findbugs]   org.apache.hadoop.io.compress.CompressionOutputStream
 [findbugs]   org.apache.hadoop.hbase.filter.WritableByteArrayComparable
 [findbugs]   org.antlr.runtime.tree.CommonTreeNodeStream
 [findbugs]   org.apache.log4j.Level
 [findbugs]   org.apache.hadoop.hbase.client.Scan
 [findbugs]   org.jruby.anno.JRubyMethod
 [findbugs]   org.apache.hadoop.mapreduce.Job
 [findbugs]   com.google.common.util.concurrent.Futures
 [findbugs]   org.apache.commons.logging.LogFactory
 [findbugs]   org.apache.commons.codec.binary.Base64
 [findbugs]   org.codehaus.jackson.map.ObjectMapper
 [findbugs]   org.apache.hadoop.fs.FileSystem
 [findbugs]   org.jruby.embed.LocalContextScope
 [findbugs]   org.apache.hadoop.hbase.filter.FilterList$Operator
 [findbugs]   org.jruby.RubySymbol
 [findbugs]   org.apache.hadoop.hbase.io.ImmutableBytesWritable
 [findbugs]   org.apache.hadoop.io.serializer.SerializationFactory
 [findbugs]   org.antlr.runtime.tree.TreeAdaptor
 [findbugs]   org.apache.hadoop.mapred.RunningJob
 [findbugs]   org.antlr.runtime.CommonTokenStream
 [findbugs]   org.apache.hadoop.io.DataInputBuffer
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile
 [findbugs]   org.apache.commons.cli.GnuParser
 [findbugs]   org.mozilla.javascript.Context
 [findbugs]   org.apache.hadoop.io.FloatWritable
 [findbugs]   org.antlr.runtime.tree.RewriteEarlyExitException
 [findbugs]   org.apache.hadoop.hbase.HBaseConfiguration
 [findbugs]   org.codehaus.jackson.JsonGenerationException
 [findbugs]   org.apache.hadoop.mapreduce.TaskInputOutputContext
 [findbugs]   org.apache.hadoop.io.compress.GzipCodec
 [findbugs]   org.jruby.RubyString
 [findbugs]   org.antlr.runtime.BaseRecognizer
 [findbugs]   org.apache.hadoop.fs.FileUtil
 [findbugs]   org.apache.hadoop.fs.Path
 [findbugs]   org.jruby.RubyFixnum
 [findbugs]   org.apache.hadoop.hbase.client.Put
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Writer
 [findbugs]   jline.ConsoleReader
 [findbugs]   com.google.common.collect.Lists
 [findbugs]   org.apache.hadoop.mapreduce.MapContext
 [findbugs]   org.python.core.PyJavaPackage
 [findbugs]   org.apache.hadoop.hbase.filter.ColumnPrefixFilter
 [findbugs]   org.python.core.PyStringMap
 [findbugs]   groovy.util.ResourceException
 [findbugs]   org.joda.time.format.DateTimeFormat
 [findbugs]   org.apache.hadoop.mapreduce.TaskID
 [findbugs]   org.joda.time.Period
 [findbugs]   org.jruby.embed.ScriptingContainer
 [findbugs]   org.jruby.Ruby
 [findbugs]   groovy.util.GroovyScriptEngine
 [findbugs]   org.apache.hadoop.hbase.client.HTable
 [findbugs]   org.apache.hadoop.io.FloatWritable$Comparator
 [findbugs]   org.apache.zookeeper.ZooKeeper
 [findbugs]   org.codehaus.jackson.map.JsonMappingException
 [findbugs]   org.python.core.PyFunction
 [findbugs]   org.antlr.runtime.TokenSource
 [findbugs]   com.google.common.io.Files
 [findbugs]   groovy.util.ScriptException
 [findbugs]   com.jcraft.jsch.ChannelDirectTCPIP
 [findbugs]   com.jcraft.jsch.JSchException
 [findbugs]   org.python.util.PythonInterpreter
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil
 [findbugs]   org.python.core.PyInteger
 [findbugs]   org.apache.hadoop.mapred.JobConf
 [findbugs]   org.joda.time.DateTime
 [findbugs]   org.apache.hadoop.util.bloom.Key
 [findbugs]   org.joda.time.Years
 [findbugs]   org.apache.hadoop.io.Text
 [findbugs]   org.antlr.runtime.NoViableAltException
 [findbugs]   org.apache.hadoop.util.GenericOptionsParser
 [findbugs]   org.apache.hadoop.mapreduce.JobID
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptID
 [findbugs]   org.apache.hadoop.filecache.DistributedCache
 [findbugs]   org.joda.time.DateTimeZone
 [findbugs]   org.apache.hadoop.fs.FSDataOutputStream
 [findbugs]   org.python.core.PyList
 [findbugs]   org.antlr.runtime.tree.TreeNodeStream
 [findbugs]   org.apache.hadoop.hbase.filter.BinaryComparator
 [findbugs]   dk.brics.automaton.RegExp
 [findbugs]   org.mozilla.javascript.Scriptable
 [findbugs]   org.mozilla.javascript.EcmaError
 [findbugs]   org.apache.hadoop.io.serializer.Serializer
 [findbugs]   org.fusesource.jansi.AnsiConsole
 [findbugs]   org.apache.hadoop.util.bloom.Filter
 [findbugs]   org.python.core.PyNone
 [findbugs]   org.mozilla.javascript.Function
 [findbugs]   org.python.core.PySystemState
 [findbugs]   org.antlr.runtime.RecognizerSharedState
 [findbugs]   org.codehaus.jackson.JsonFactory
 [findbugs]   org.antlr.runtime.EarlyExitException
 [findbugs]   org.apache.hadoop.hdfs.DistributedFileSystem
 [findbugs]   org.apache.hadoop.util.LineReader
 [findbugs] Warnings generated: 47
 [findbugs] Missing classes: 284
 [findbugs] Calculating exit code...
 [findbugs] Setting 'missing class' flag (2)
 [findbugs] Setting 'bugs found' flag (1)
 [findbugs] Exit code set to: 3
 [findbugs] Java Result: 3
 [findbugs] Classes needed for analysis were missing
 [findbugs] Output saved to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml>
     [xslt] Processing <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml> to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.html>
     [xslt] Loading stylesheet /home/jenkins/tools/findbugs/latest/src/xsl/default.xsl

BUILD SUCCESSFUL
Total time: 5 minutes 53 seconds


======================================================================
======================================================================
STORE: saving artifacts
======================================================================
======================================================================




======================================================================
======================================================================
CLEAN: cleaning workspace
======================================================================
======================================================================


Buildfile: build.xml

clean:
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src-gen>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/docs/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/test/org/apache/pig/test/utils/dotGraph/parser>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig.jar>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig-withouthadoop.jar>

clean:

clean:

BUILD SUCCESSFUL
Total time: 0 seconds


======================================================================
======================================================================
ANALYSIS: ant -Drun.clover=true -Dclover.home=/homes/hudson/tools/clover/latest clover test-commit generate-clover-reports -Dtest.junit.output.format=xml -Dtest.output=yes -Dversion=${BUILD_ID} -Dfindbugs.home=$FINDBUGS_HOME -Djava5.home=$JAVA5_HOME -Dforrest.home=$FORREST_HOME -Dclover.home=$CLOVER_HOME -Declipse.home=$ECLIPSE_HOME
======================================================================
======================================================================


Buildfile: build.xml

clover.setup:
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/clover/db>
[clover-setup] Clover Version 3.1.0, built on May 31 2011 (build-821)
[clover-setup] Loaded from: /home/jenkins/tools/clover/latest/lib/clover.jar

BUILD FAILED
java.lang.RuntimeException: Clover upgrades for your license ended December 14 2010, and this version of Clover was built May 31 2011. Please visit http://www.atlassian.com/clover/renew for information on upgrading your license.
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:103)
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:25)
	at com.cenqua.clover.tasks.AbstractCloverTask.execute(AbstractCloverTask.java:52)
	at org.apache.tools.ant.UnknownElement.execute(UnknownElement.java:288)
	at sun.reflect.GeneratedMethodAccessor1.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
	at java.lang.reflect.Method.invoke(Method.java:597)
	at org.apache.tools.ant.dispatch.DispatchUtils.execute(DispatchUtils.java:106)
	at org.apache.tools.ant.Task.perform(Task.java:348)
	at org.apache.tools.ant.Target.execute(Target.java:357)
	at org.apache.tools.ant.Target.performTasks(Target.java:385)
	at org.apache.tools.ant.Project.executeSortedTargets(Project.java:1337)
	at org.apache.tools.ant.Project.executeTarget(Project.java:1306)
	at org.apache.tools.ant.helper.DefaultExecutor.executeTargets(DefaultExecutor.java:41)
	at org.apache.tools.ant.Project.executeTargets(Project.java:1189)
	at org.apache.tools.ant.Main.runBuild(Main.java:758)
	at org.apache.tools.ant.Main.startAnt(Main.java:217)
	at org.apache.tools.ant.launch.Launcher.run(Launcher.java:257)
	at org.apache.tools.ant.launch.Launcher.main(Launcher.java:104)

Total time: 1 second
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1321

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1321/changes>

Changes:

[daijy] PIG-2791: Pig does not work with ViewFileSystem

------------------------------------------
[...truncated 6897 lines...]
[ivy:resolve] 	found org.apache.avro#avro;1.5.3 in default
[ivy:resolve] 	found com.thoughtworks.paranamer#paranamer;2.3 in default
[ivy:resolve] 	found org.xerial.snappy#snappy-java;1.0.3.2 in default
[ivy:resolve] 	found org.slf4j#slf4j-api;1.6.1 in default
[ivy:resolve] 	found com.googlecode.json-simple#json-simple;1.1 in maven2
[ivy:resolve] 	found com.jcraft#jsch;0.1.38 in maven2
[ivy:resolve] 	found jline#jline;0.9.94 in default
[ivy:resolve] 	found net.java.dev.javacc#javacc;4.2 in maven2
[ivy:resolve] 	found org.codehaus.groovy#groovy-all;1.8.6 in maven2
[ivy:resolve] 	found org.codehaus.jackson#jackson-mapper-asl;1.8.8 in maven2
[ivy:resolve] 	found org.codehaus.jackson#jackson-core-asl;1.8.8 in maven2
[ivy:resolve] 	found org.fusesource.jansi#jansi;1.9 in maven2
[ivy:resolve] 	found joda-time#joda-time;1.6 in maven2
[ivy:resolve] 	found com.google.guava#guava;11.0 in maven2
[ivy:resolve] 	found org.python#jython-standalone;2.5.2 in maven2
[ivy:resolve] 	found rhino#js;1.7R2 in maven2
[ivy:resolve] 	found org.antlr#antlr;3.4 in maven2
[ivy:resolve] 	found org.antlr#antlr-runtime;3.4 in maven2
[ivy:resolve] 	found org.antlr#stringtemplate;3.2.1 in maven2
[ivy:resolve] 	found antlr#antlr;2.7.7 in maven2
[ivy:resolve] 	found org.antlr#ST4;4.0.4 in maven2
[ivy:resolve] 	found org.apache.zookeeper#zookeeper;3.3.3 in maven2
[ivy:resolve] 	found dk.brics.automaton#automaton;1.11-8 in maven2
[ivy:resolve] 	found org.jruby#jruby-complete;1.6.7 in maven2
[ivy:resolve] 	found org.apache.hbase#hbase;0.90.0 in maven2
[ivy:resolve] 	found org.vafer#jdeb;0.8 in maven2
[ivy:resolve] 	found org.mockito#mockito-all;1.8.4 in maven2
[ivy:resolve] 	found xalan#xalan;2.7.1 in maven2
[ivy:resolve] 	found xalan#serializer;2.7.1 in maven2
[ivy:resolve] 	found xml-apis#xml-apis;1.3.04 in maven2
[ivy:resolve] 	found xerces#xercesImpl;2.10.0 in maven2
[ivy:resolve] 	found xml-apis#xml-apis;1.4.01 in maven2
[ivy:resolve] 	found junit#junit;4.5 in maven2
[ivy:resolve] 	found org.apache.hive#hive-exec;0.8.0 in maven2
[ivy:resolve] 	found junit#junit;3.8.1 in maven2
[ivy:resolve] 	found com.google.code.p.arat#rat-lib;0.5.1 in maven2
[ivy:resolve] 	found commons-collections#commons-collections;3.2 in maven2
[ivy:resolve] 	found commons-lang#commons-lang;2.1 in maven2
[ivy:resolve] 	found jdiff#jdiff;1.0.9 in default
[ivy:resolve] 	found checkstyle#checkstyle;4.2 in maven2
[ivy:resolve] 	found commons-beanutils#commons-beanutils-core;1.7.0 in maven2
[ivy:resolve] 	found commons-cli#commons-cli;1.0 in maven2
[ivy:resolve] 	found commons-logging#commons-logging;1.0.3 in maven2
[ivy:resolve] 	found org.codehaus.jackson#jackson-mapper-asl;1.0.1 in maven2
[ivy:resolve] 	found org.codehaus.jackson#jackson-core-asl;1.0.1 in maven2
[ivy:resolve] 	found com.sun.jersey#jersey-bundle;1.8 in maven2
[ivy:resolve] 	found com.sun.jersey#jersey-server;1.8 in default
[ivy:resolve] 	found com.sun.jersey.contribs#jersey-guice;1.8 in maven2
[ivy:resolve] 	found commons-httpclient#commons-httpclient;3.1 in fs
[ivy:resolve] 	found javax.servlet#servlet-api;2.5 in fs
[ivy:resolve] 	found javax.ws.rs#jsr311-api;1.1.1 in maven2
[ivy:resolve] 	found com.google.protobuf#protobuf-java;2.4.0a in fs
[ivy:resolve] 	found javax.inject#javax.inject;1 in maven2
[ivy:resolve] 	found javax.xml.bind#jaxb-api;2.2.2 in fs
[ivy:resolve] 	found com.sun.xml.bind#jaxb-impl;2.2.3-1 in fs
[ivy:resolve] 	found com.google.inject#guice;3.0 in maven2
[ivy:resolve] 	found com.google.inject.extensions#guice-servlet;3.0 in maven2
[ivy:resolve] 	found aopalliance#aopalliance;1.0 in fs
[ivy:resolve] 	found org.apache.hadoop#hadoop-annotations;2.0.0-alpha in maven2
[ivy:resolve] 	found org.apache.hadoop#hadoop-auth;2.0.0-alpha in maven2
[ivy:resolve] 	found org.apache.hadoop#hadoop-common;2.0.0-alpha in maven2
[ivy:resolve] 	found org.apache.hadoop#hadoop-hdfs;2.0.0-alpha in maven2
[ivy:resolve] 	found org.apache.hadoop#hadoop-mapreduce-client-core;2.0.0-alpha in maven2
[ivy:resolve] 	found org.apache.hadoop#hadoop-mapreduce-client-jobclient;2.0.0-alpha in maven2
[ivy:resolve] 	found org.apache.hadoop#hadoop-yarn-server-tests;2.0.0-alpha in maven2
[ivy:resolve] 	found org.apache.hadoop#hadoop-mapreduce-client-app;2.0.0-alpha in maven2
[ivy:resolve] 	found org.apache.hadoop#hadoop-mapreduce-client-shuffle;2.0.0-alpha in maven2
[ivy:resolve] 	found org.apache.hadoop#hadoop-mapreduce-client-common;2.0.0-alpha in maven2
[ivy:resolve] 	found org.apache.hadoop#hadoop-yarn-api;2.0.0-alpha in maven2
[ivy:resolve] 	found org.apache.hadoop#hadoop-yarn-common;2.0.0-alpha in maven2
[ivy:resolve] 	found org.apache.hadoop#hadoop-yarn-server;2.0.0-alpha in maven2
[ivy:resolve] 	found org.apache.hadoop#hadoop-yarn-server-web-proxy;2.0.0-alpha in maven2
[ivy:resolve] 	found org.apache.hadoop#hadoop-yarn-server-common;2.0.0-alpha in maven2
[ivy:resolve] 	found org.apache.hadoop#hadoop-yarn-server-nodemanager;2.0.0-alpha in maven2
[ivy:resolve] 	found org.apache.hadoop#hadoop-yarn-server-resourcemanager;2.0.0-alpha in maven2
[ivy:resolve] 	found org.apache.hadoop#hadoop-mapreduce-client-hs;2.0.0-alpha in maven2
[ivy:resolve] :: resolution report :: resolve 4218ms :: artifacts dl 88ms
[ivy:resolve] 	:: evicted modules:
[ivy:resolve] 	junit#junit;3.8.1 by [junit#junit;4.5] in [default, test, runtime, javadoc, buildJar]
[ivy:resolve] 	commons-logging#commons-logging;1.0.3 by [commons-logging#commons-logging;1.1.1] in [default, hadoop20, test, compile, runtime, javadoc, buildJar]
[ivy:resolve] 	commons-codec#commons-codec;1.2 by [commons-codec#commons-codec;1.4] in [default, hadoop20, test, compile, runtime, javadoc, buildJar]
[ivy:resolve] 	commons-logging#commons-logging;1.1 by [commons-logging#commons-logging;1.1.1] in [default, hadoop20, test, compile, runtime, javadoc, buildJar]
[ivy:resolve] 	commons-codec#commons-codec;1.3 by [commons-codec#commons-codec;1.4] in [default, hadoop20, test, compile, runtime, javadoc, buildJar]
[ivy:resolve] 	commons-httpclient#commons-httpclient;3.1 by [commons-httpclient#commons-httpclient;3.0.1] in [default, hadoop20, test, compile, runtime, javadoc, buildJar]
[ivy:resolve] 	org.codehaus.jackson#jackson-mapper-asl;1.0.1 by [org.codehaus.jackson#jackson-mapper-asl;1.8.8] in [default, test, compile, runtime, javadoc, buildJar]
[ivy:resolve] 	org.slf4j#slf4j-api;1.5.2 by [org.slf4j#slf4j-api;1.6.1] in [default, test, compile, runtime, javadoc, buildJar]
[ivy:resolve] 	org.apache.mina#mina-core;2.0.0-M4 by [org.apache.mina#mina-core;2.0.0-M5] in [default, hadoop20, test, compile, runtime, javadoc, buildJar]
[ivy:resolve] 	org.apache.ftpserver#ftplet-api;1.0.0-M2 by [org.apache.ftpserver#ftplet-api;1.0.0] in [default, hadoop20, test, compile, runtime, javadoc, buildJar]
[ivy:resolve] 	org.apache.ftpserver#ftpserver-core;1.0.0-M2 by [org.apache.ftpserver#ftpserver-core;1.0.0] in [default, hadoop20, test, compile, runtime, javadoc, buildJar]
[ivy:resolve] 	org.apache.mina#mina-core;2.0.0-M2 by [org.apache.mina#mina-core;2.0.0-M5] in [default, hadoop20, test, compile, runtime, javadoc, buildJar]
[ivy:resolve] 	commons-cli#commons-cli;1.0 by [commons-cli#commons-cli;1.2] in [default, test, compile, runtime, javadoc, buildJar]
[ivy:resolve] 	org.codehaus.jackson#jackson-mapper-asl;1.7.3 by [org.codehaus.jackson#jackson-mapper-asl;1.8.8] in [default, test, compile, runtime, javadoc, buildJar]
[ivy:resolve] 	org.antlr#antlr-runtime;3.3 by [org.antlr#antlr-runtime;3.4] in [default, test, compile, runtime, javadoc, buildJar]
[ivy:resolve] 	xml-apis#xml-apis;1.3.04 by [xml-apis#xml-apis;1.4.01] in [default, test, runtime, javadoc, buildJar]
	---------------------------------------------------------------------
	|                  |            modules            ||   artifacts   |
	|       conf       | number| search|dwnlded|evicted|| number|dwnlded|
	---------------------------------------------------------------------
	|      master      |   0   |   0   |   0   |   0   ||   0   |   0   |
	|      default     |   89  |   0   |   0   |   16  ||   74  |   0   |
	|      runtime     |   89  |   0   |   0   |   16  ||   74  |   0   |
	|      compile     |   82  |   0   |   0   |   14  ||   69  |   0   |
	|       test       |   89  |   0   |   0   |   16  ||   74  |   0   |
	|      javadoc     |   89  |   0   |   0   |   16  ||   74  |   0   |
	|   releaseaudit   |   3   |   0   |   0   |   0   ||   3   |   0   |
	|       jdiff      |   3   |   0   |   0   |   0   ||   3   |   0   |
	|    checkstyle    |   6   |   0   |   0   |   0   ||   6   |   0   |
	|     buildJar     |   89  |   0   |   0   |   16  ||   74  |   0   |
	|     hadoop20     |   48  |   0   |   0   |   9   ||   39  |   0   |
	|     hadoop23     |   39  |   0   |   0   |   0   ||   41  |   0   |
	---------------------------------------------------------------------

ivy-compile:
[ivy:retrieve] :: retrieving :: org.apache.pig#pig
[ivy:retrieve] 	confs: [compile]
[ivy:retrieve] 	69 artifacts copied, 0 already retrieved (62857kB/240ms)
[ivy:cachepath] DEPRECATED: 'ivy.conf.file' is deprecated, use 'ivy.settings.file' instead
[ivy:cachepath] :: loading settings :: file = <https://builds.apache.org/job/Pig-trunk/ws/trunk/ivy/ivysettings.xml>

init:
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/src-gen/org/apache/pig/impl/logicalLayer/parser>
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/src-gen/org/apache/pig/tools/pigscript/parser>
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/src-gen/org/apache/pig/tools/parameters>
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/classes>
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/classes>
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/test/org/apache/pig/test/utils/dotGraph/parser>
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/src-gen/org/apache/pig/data/parser>
     [move] Moving 1 file to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/ivy/lib/Pig>

cc-compile:
   [javacc] Java Compiler Compiler Version 4.2 (Parser Generator)
   [javacc] (type "javacc" with no arguments for help)
   [javacc] Reading from file <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/org/apache/pig/tools/pigscript/parser/PigScriptParser.jj> . . .
   [javacc] File "TokenMgrError.java" does not exist.  Will create one.
   [javacc] File "ParseException.java" does not exist.  Will create one.
   [javacc] File "Token.java" does not exist.  Will create one.
   [javacc] File "JavaCharStream.java" does not exist.  Will create one.
   [javacc] Parser generated successfully.
   [javacc] Java Compiler Compiler Version 4.2 (Parser Generator)
   [javacc] (type "javacc" with no arguments for help)
   [javacc] Reading from file <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/org/apache/pig/tools/parameters/PigFileParser.jj> . . .
   [javacc] Warning: Lookahead adequacy checking not being performed since option LOOKAHEAD is more than 1.  Set option FORCE_LA_CHECK to true to force checking.
   [javacc] File "TokenMgrError.java" does not exist.  Will create one.
   [javacc] File "ParseException.java" does not exist.  Will create one.
   [javacc] File "Token.java" does not exist.  Will create one.
   [javacc] File "JavaCharStream.java" does not exist.  Will create one.
   [javacc] Parser generated with 0 errors and 1 warnings.
   [javacc] Java Compiler Compiler Version 4.2 (Parser Generator)
   [javacc] (type "javacc" with no arguments for help)
   [javacc] Reading from file <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/org/apache/pig/tools/parameters/ParamLoader.jj> . . .
   [javacc] File "TokenMgrError.java" is being rebuilt.
   [javacc] File "ParseException.java" is being rebuilt.
   [javacc] File "Token.java" is being rebuilt.
   [javacc] File "JavaCharStream.java" is being rebuilt.
   [javacc] Parser generated successfully.
   [jjtree] Java Compiler Compiler Version 4.2 (Tree Builder)
   [jjtree] (type "jjtree" with no arguments for help)
   [jjtree] Reading from file <https://builds.apache.org/job/Pig-trunk/ws/trunk/test/org/apache/pig/test/utils/dotGraph/DOTParser.jjt> . . .
   [jjtree] File "Node.java" does not exist.  Will create one.
   [jjtree] File "SimpleNode.java" does not exist.  Will create one.
   [jjtree] File "DOTParserTreeConstants.java" does not exist.  Will create one.
   [jjtree] File "JJTDOTParserState.java" does not exist.  Will create one.
   [jjtree] Annotated grammar generated successfully in <https://builds.apache.org/job/Pig-trunk/ws/trunk/test/org/apache/pig/test/utils/dotGraph/parser/DOTParser.jj>
   [javacc] Java Compiler Compiler Version 4.2 (Parser Generator)
   [javacc] (type "javacc" with no arguments for help)
   [javacc] Reading from file <https://builds.apache.org/job/Pig-trunk/ws/trunk/test/org/apache/pig/test/utils/dotGraph/parser/DOTParser.jj> . . .
   [javacc] File "TokenMgrError.java" does not exist.  Will create one.
   [javacc] File "ParseException.java" does not exist.  Will create one.
   [javacc] File "Token.java" does not exist.  Will create one.
   [javacc] File "SimpleCharStream.java" does not exist.  Will create one.
   [javacc] Parser generated successfully.

prepare:
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/src-gen/org/apache/pig/parser>

genLexer:

genParser:

genTreeParser:

gen:

compile:
     [echo] *** Building Main Sources ***
     [echo] *** To compile with all warnings enabled, supply -Dall.warnings=1 on command line ***
     [echo] *** If all.warnings property is supplied, compile-sources-all-warnings target will be executed ***
     [echo] *** Else, compile-sources (which only warns about deprecations) target will be executed ***

compile-sources:
    [javac] Compiling 786 source files to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/classes>
   [clover] Clover Version 2.4.3, built on March 09 2009 (build-756)
   [clover] Loaded from: /home/jenkins/tools/clover/latest/lib/clover.jar
   [clover] Clover: Open Source License registered to Apache.
   [clover] Creating new database at '<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/clover/db/pig_coverage.db'.>
   [clover] Processing files at 1.5 source level.
   [clover] Clover all over. Instrumented 753 files (58 packages).
   [clover] Elapsed time = 5.922 secs. (127.153 files/sec, 25,845.998 srclines/sec)
    [javac] Note: Some input files use or override a deprecated API.
    [javac] Note: Recompile with -Xlint:deprecation for details.
    [javac] Note: Some input files use unchecked or unsafe operations.
    [javac] Note: Recompile with -Xlint:unchecked for details.
     [copy] Copying 1 file to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/classes/org/apache/pig/tools/grunt>
     [copy] Copying 1 file to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/classes/org/apache/pig/tools/grunt>

compile-sources-all-warnings:

ivy-test:
[ivy:retrieve] :: retrieving :: org.apache.pig#pig
[ivy:retrieve] 	confs: [test]
[ivy:retrieve] 	7 artifacts copied, 67 already retrieved (8775kB/40ms)

compile-test:
     [echo] *** Building Test Sources ***
     [echo] *** To compile with all warnings enabled, supply -Dall.warnings=1 on command line ***
     [echo] *** If all.warnings property is supplied, compile-sources-all-warnings target will be executed ***
     [echo] *** Else, compile-sources (which only warns about deprecations) target will be executed ***

compile-sources:
    [javac] Compiling 348 source files to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/classes>
   [clover] Clover Version 2.4.3, built on March 09 2009 (build-756)
   [clover] Loaded from: /home/jenkins/tools/clover/latest/lib/clover.jar
   [clover] Clover: Open Source License registered to Apache.
   [clover] Updating existing database at '<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/clover/db/pig_coverage.db'.>
   [clover] Processing files at 1.5 source level.
   [clover] Clover all over. Instrumented 0 files (0 packages).
   [clover] Elapsed time = 1.935 secs. (0 files/sec, 0 srclines/sec)
    [javac] <https://builds.apache.org/job/Pig-trunk/ws/trunk/test/org/apache/pig/test/TestBuiltin.java>:195: cannot find symbol
    [javac] symbol  : class Properties
    [javac] location: class org.apache.pig.test.TestBuiltin
    [javac]         pigServer = new PigServer(ExecType.LOCAL, new Properties());
    [javac]                                                       ^
    [javac] Note: Some input files use or override a deprecated API.
    [javac] Note: Recompile with -Xlint:deprecation for details.
    [javac] Note: Some input files use unchecked or unsafe operations.
    [javac] Note: Recompile with -Xlint:unchecked for details.
    [javac] 1 error

BUILD FAILED
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:471: The following error occurred while executing this line:
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:503: Compile failed; see the compiler error output for details.

Total time: 43 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1320

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1320/changes>

Changes:

[jcoveney] PIG-2833: org.apache.pig.pigunit.pig.PigServer does not initialize set default log level of pigContext (cheolsoo via jcoveney)

[jcoveney] PIG-2852: Update documentation regarding parallel local mode execution (cheolsoo via jcoveney)

------------------------------------------
[...truncated 6615 lines...]
 [findbugs]   com.google.common.util.concurrent.CheckedFuture
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Reader$Scanner$Entry
 [findbugs]   org.apache.hadoop.fs.FSDataInputStream
 [findbugs]   org.python.core.PyObject
 [findbugs]   jline.History
 [findbugs]   org.jruby.embed.internal.LocalContextProvider
 [findbugs]   org.apache.hadoop.io.BooleanWritable
 [findbugs]   org.apache.log4j.Logger
 [findbugs]   org.apache.hadoop.hbase.filter.FamilyFilter
 [findbugs]   groovy.lang.Tuple
 [findbugs]   org.antlr.runtime.IntStream
 [findbugs]   org.apache.hadoop.util.ReflectionUtils
 [findbugs]   org.apache.hadoop.fs.ContentSummary
 [findbugs]   org.jruby.runtime.builtin.IRubyObject
 [findbugs]   org.jruby.RubyInteger
 [findbugs]   org.python.core.PyTuple
 [findbugs]   org.mortbay.log.Log
 [findbugs]   org.apache.hadoop.conf.Configuration
 [findbugs]   com.google.common.base.Joiner
 [findbugs]   org.apache.hadoop.mapreduce.lib.input.FileSplit
 [findbugs]   org.apache.hadoop.mapred.Counters$Counter
 [findbugs]   com.jcraft.jsch.Channel
 [findbugs]   org.apache.hadoop.mapred.JobPriority
 [findbugs]   org.apache.commons.cli.Options
 [findbugs]   org.apache.hadoop.mapred.JobID
 [findbugs]   org.apache.hadoop.util.bloom.BloomFilter
 [findbugs]   org.python.core.PyFrame
 [findbugs]   org.apache.hadoop.hbase.filter.CompareFilter
 [findbugs]   org.apache.hadoop.util.VersionInfo
 [findbugs]   org.python.core.PyString
 [findbugs]   org.apache.hadoop.io.Text$Comparator
 [findbugs]   org.jruby.runtime.Block
 [findbugs]   org.antlr.runtime.MismatchedSetException
 [findbugs]   org.apache.hadoop.io.BytesWritable
 [findbugs]   org.apache.hadoop.fs.FsShell
 [findbugs]   org.joda.time.Months
 [findbugs]   org.mozilla.javascript.ImporterTopLevel
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableOutputFormat
 [findbugs]   org.apache.hadoop.mapred.TaskReport
 [findbugs]   org.apache.hadoop.security.UserGroupInformation
 [findbugs]   org.antlr.runtime.tree.RewriteRuleSubtreeStream
 [findbugs]   org.apache.commons.cli.HelpFormatter
 [findbugs]   com.google.common.collect.Maps
 [findbugs]   org.joda.time.ReadableInstant
 [findbugs]   org.mozilla.javascript.NativeObject
 [findbugs]   org.apache.hadoop.hbase.HConstants
 [findbugs]   org.apache.hadoop.io.serializer.Deserializer
 [findbugs]   org.antlr.runtime.FailedPredicateException
 [findbugs]   org.apache.hadoop.io.compress.CompressionCodec
 [findbugs]   org.jruby.RubyNil
 [findbugs]   org.apache.hadoop.fs.FileStatus
 [findbugs]   org.apache.hadoop.hbase.client.Result
 [findbugs]   org.apache.hadoop.mapreduce.JobContext
 [findbugs]   org.codehaus.jackson.JsonGenerator
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptContext
 [findbugs]   org.apache.hadoop.io.BytesWritable$Comparator
 [findbugs]   org.apache.hadoop.io.LongWritable$Comparator
 [findbugs]   org.codehaus.jackson.map.util.LRUMap
 [findbugs]   org.apache.hadoop.hbase.util.Bytes
 [findbugs]   org.antlr.runtime.MismatchedTokenException
 [findbugs]   org.codehaus.jackson.JsonParser
 [findbugs]   com.jcraft.jsch.UserInfo
 [findbugs]   org.python.core.PyException
 [findbugs]   org.apache.commons.cli.ParseException
 [findbugs]   org.apache.hadoop.io.compress.CompressionOutputStream
 [findbugs]   org.apache.hadoop.hbase.filter.WritableByteArrayComparable
 [findbugs]   org.antlr.runtime.tree.CommonTreeNodeStream
 [findbugs]   org.apache.log4j.Level
 [findbugs]   org.apache.hadoop.hbase.client.Scan
 [findbugs]   org.jruby.anno.JRubyMethod
 [findbugs]   org.apache.hadoop.mapreduce.Job
 [findbugs]   com.google.common.util.concurrent.Futures
 [findbugs]   org.apache.commons.logging.LogFactory
 [findbugs]   org.apache.commons.codec.binary.Base64
 [findbugs]   org.codehaus.jackson.map.ObjectMapper
 [findbugs]   org.apache.hadoop.fs.FileSystem
 [findbugs]   org.jruby.embed.LocalContextScope
 [findbugs]   org.apache.hadoop.hbase.filter.FilterList$Operator
 [findbugs]   org.jruby.RubySymbol
 [findbugs]   org.apache.hadoop.hbase.io.ImmutableBytesWritable
 [findbugs]   org.apache.hadoop.io.serializer.SerializationFactory
 [findbugs]   org.antlr.runtime.tree.TreeAdaptor
 [findbugs]   org.apache.hadoop.mapred.RunningJob
 [findbugs]   org.antlr.runtime.CommonTokenStream
 [findbugs]   org.apache.hadoop.io.DataInputBuffer
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile
 [findbugs]   org.apache.commons.cli.GnuParser
 [findbugs]   org.mozilla.javascript.Context
 [findbugs]   org.apache.hadoop.io.FloatWritable
 [findbugs]   org.antlr.runtime.tree.RewriteEarlyExitException
 [findbugs]   org.apache.hadoop.hbase.HBaseConfiguration
 [findbugs]   org.codehaus.jackson.JsonGenerationException
 [findbugs]   org.apache.hadoop.mapreduce.TaskInputOutputContext
 [findbugs]   org.apache.hadoop.io.compress.GzipCodec
 [findbugs]   org.jruby.RubyString
 [findbugs]   org.antlr.runtime.BaseRecognizer
 [findbugs]   org.apache.hadoop.fs.FileUtil
 [findbugs]   org.apache.hadoop.fs.Path
 [findbugs]   org.jruby.RubyFixnum
 [findbugs]   org.apache.hadoop.hbase.client.Put
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Writer
 [findbugs]   jline.ConsoleReader
 [findbugs]   com.google.common.collect.Lists
 [findbugs]   org.apache.hadoop.mapreduce.MapContext
 [findbugs]   org.python.core.PyJavaPackage
 [findbugs]   org.apache.hadoop.hbase.filter.ColumnPrefixFilter
 [findbugs]   org.python.core.PyStringMap
 [findbugs]   groovy.util.ResourceException
 [findbugs]   org.joda.time.format.DateTimeFormat
 [findbugs]   org.apache.hadoop.mapreduce.TaskID
 [findbugs]   org.joda.time.Period
 [findbugs]   org.jruby.embed.ScriptingContainer
 [findbugs]   org.jruby.Ruby
 [findbugs]   groovy.util.GroovyScriptEngine
 [findbugs]   org.apache.hadoop.hbase.client.HTable
 [findbugs]   org.apache.hadoop.io.FloatWritable$Comparator
 [findbugs]   org.apache.zookeeper.ZooKeeper
 [findbugs]   org.codehaus.jackson.map.JsonMappingException
 [findbugs]   org.python.core.PyFunction
 [findbugs]   org.antlr.runtime.TokenSource
 [findbugs]   com.google.common.io.Files
 [findbugs]   groovy.util.ScriptException
 [findbugs]   com.jcraft.jsch.ChannelDirectTCPIP
 [findbugs]   com.jcraft.jsch.JSchException
 [findbugs]   org.python.util.PythonInterpreter
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil
 [findbugs]   org.python.core.PyInteger
 [findbugs]   org.apache.hadoop.mapred.JobConf
 [findbugs]   org.joda.time.DateTime
 [findbugs]   org.apache.hadoop.util.bloom.Key
 [findbugs]   org.joda.time.Years
 [findbugs]   org.apache.hadoop.io.Text
 [findbugs]   org.antlr.runtime.NoViableAltException
 [findbugs]   org.apache.hadoop.util.GenericOptionsParser
 [findbugs]   org.apache.hadoop.mapreduce.JobID
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptID
 [findbugs]   org.apache.hadoop.filecache.DistributedCache
 [findbugs]   org.joda.time.DateTimeZone
 [findbugs]   org.apache.hadoop.fs.FSDataOutputStream
 [findbugs]   org.python.core.PyList
 [findbugs]   org.antlr.runtime.tree.TreeNodeStream
 [findbugs]   org.apache.hadoop.hbase.filter.BinaryComparator
 [findbugs]   dk.brics.automaton.RegExp
 [findbugs]   org.mozilla.javascript.Scriptable
 [findbugs]   org.mozilla.javascript.EcmaError
 [findbugs]   org.apache.hadoop.io.serializer.Serializer
 [findbugs]   org.fusesource.jansi.AnsiConsole
 [findbugs]   org.apache.hadoop.util.bloom.Filter
 [findbugs]   org.python.core.PyNone
 [findbugs]   org.mozilla.javascript.Function
 [findbugs]   org.python.core.PySystemState
 [findbugs]   org.antlr.runtime.RecognizerSharedState
 [findbugs]   org.codehaus.jackson.JsonFactory
 [findbugs]   org.antlr.runtime.EarlyExitException
 [findbugs]   org.apache.hadoop.hdfs.DistributedFileSystem
 [findbugs]   org.apache.hadoop.util.LineReader
 [findbugs] Warnings generated: 47
 [findbugs] Missing classes: 284
 [findbugs] Calculating exit code...
 [findbugs] Setting 'missing class' flag (2)
 [findbugs] Setting 'bugs found' flag (1)
 [findbugs] Exit code set to: 3
 [findbugs] Java Result: 3
 [findbugs] Classes needed for analysis were missing
 [findbugs] Output saved to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml>
     [xslt] Processing <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml> to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.html>
     [xslt] Loading stylesheet /home/jenkins/tools/findbugs/latest/src/xsl/default.xsl

BUILD SUCCESSFUL
Total time: 5 minutes 28 seconds


======================================================================
======================================================================
STORE: saving artifacts
======================================================================
======================================================================




======================================================================
======================================================================
CLEAN: cleaning workspace
======================================================================
======================================================================


Buildfile: build.xml

clean:
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src-gen>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/docs/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/test/org/apache/pig/test/utils/dotGraph/parser>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig.jar>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig-withouthadoop.jar>

clean:

clean:

BUILD SUCCESSFUL
Total time: 0 seconds


======================================================================
======================================================================
ANALYSIS: ant -Drun.clover=true -Dclover.home=/homes/hudson/tools/clover/latest clover test-commit generate-clover-reports -Dtest.junit.output.format=xml -Dtest.output=yes -Dversion=${BUILD_ID} -Dfindbugs.home=$FINDBUGS_HOME -Djava5.home=$JAVA5_HOME -Dforrest.home=$FORREST_HOME -Dclover.home=$CLOVER_HOME -Declipse.home=$ECLIPSE_HOME
======================================================================
======================================================================


Buildfile: build.xml

clover.setup:
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/clover/db>
[clover-setup] Clover Version 3.1.0, built on May 31 2011 (build-821)
[clover-setup] Loaded from: /home/jenkins/tools/clover/latest/lib/clover.jar

BUILD FAILED
java.lang.RuntimeException: Clover upgrades for your license ended December 14 2010, and this version of Clover was built May 31 2011. Please visit http://www.atlassian.com/clover/renew for information on upgrading your license.
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:103)
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:25)
	at com.cenqua.clover.tasks.AbstractCloverTask.execute(AbstractCloverTask.java:52)
	at org.apache.tools.ant.UnknownElement.execute(UnknownElement.java:288)
	at sun.reflect.GeneratedMethodAccessor1.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
	at java.lang.reflect.Method.invoke(Method.java:597)
	at org.apache.tools.ant.dispatch.DispatchUtils.execute(DispatchUtils.java:106)
	at org.apache.tools.ant.Task.perform(Task.java:348)
	at org.apache.tools.ant.Target.execute(Target.java:357)
	at org.apache.tools.ant.Target.performTasks(Target.java:385)
	at org.apache.tools.ant.Project.executeSortedTargets(Project.java:1337)
	at org.apache.tools.ant.Project.executeTarget(Project.java:1306)
	at org.apache.tools.ant.helper.DefaultExecutor.executeTargets(DefaultExecutor.java:41)
	at org.apache.tools.ant.Project.executeTargets(Project.java:1189)
	at org.apache.tools.ant.Main.runBuild(Main.java:758)
	at org.apache.tools.ant.Main.startAnt(Main.java:217)
	at org.apache.tools.ant.launch.Launcher.run(Launcher.java:257)
	at org.apache.tools.ant.launch.Launcher.main(Launcher.java:104)

Total time: 1 second
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1319

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1319/changes>

Changes:

[gdfm] PIG-2879: Pig current releases lack a UDF startsWith.This UDF tests if a given string starts with the specified prefix. (initialcontext via azaroth)

------------------------------------------
[...truncated 36347 lines...]
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] Shutting down DataNode 2
    [junit] 12/09/21 11:48:28 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/09/21 11:48:28 INFO mapred.TaskTracker: Received 'KillJobAction' for job: job_20120921113616505_0012
    [junit] 12/09/21 11:48:28 WARN mapred.TaskTracker: Unknown job job_20120921113616505_0012 being deleted.
    [junit] 12/09/21 11:48:28 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/09/21 11:48:28 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:54254 to delete  blk_28853914184746033_1127
    [junit] 12/09/21 11:48:28 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:37651 to delete  blk_3376182645604832989_1133 blk_9013264296899369525_1134 blk_-3388118641670678486_1134
    [junit] 12/09/21 11:48:28 INFO ipc.Server: Stopping server on 37868
    [junit] 12/09/21 11:48:28 INFO ipc.Server: IPC Server handler 1 on 37868: exiting
    [junit] 12/09/21 11:48:28 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/21 11:48:28 INFO ipc.Server: IPC Server handler 0 on 37868: exiting
    [junit] 12/09/21 11:48:28 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/09/21 11:48:28 INFO ipc.Server: Stopping IPC Server listener on 37868
    [junit] 12/09/21 11:48:28 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/09/21 11:48:28 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:60322, storageID=DS-318392138-67.195.138.24-60322-1348227375104, infoPort=57327, ipcPort=37868):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/09/21 11:48:28 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/09/21 11:48:28 INFO ipc.Server: IPC Server handler 2 on 37868: exiting
    [junit] 12/09/21 11:48:28 INFO datanode.DataNode: Scheduling block blk_28853914184746033_1127 file build/test/data/dfs/data/data2/current/blk_28853914184746033 for deletion
    [junit] 12/09/21 11:48:28 INFO datanode.DataNode: Deleted block blk_28853914184746033_1127 at file build/test/data/dfs/data/data2/current/blk_28853914184746033
    [junit] 12/09/21 11:48:29 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/09/21 11:48:29 INFO datanode.DataNode: Scheduling block blk_-3388118641670678486_1134 file build/test/data/dfs/data/data4/current/blk_-3388118641670678486 for deletion
    [junit] 12/09/21 11:48:29 INFO datanode.DataNode: Scheduling block blk_3376182645604832989_1133 file build/test/data/dfs/data/data3/current/blk_3376182645604832989 for deletion
    [junit] 12/09/21 11:48:29 INFO datanode.DataNode: Scheduling block blk_9013264296899369525_1134 file build/test/data/dfs/data/data3/current/blk_9013264296899369525 for deletion
    [junit] 12/09/21 11:48:29 INFO datanode.DataNode: Deleted block blk_3376182645604832989_1133 at file build/test/data/dfs/data/data3/current/blk_3376182645604832989
    [junit] 12/09/21 11:48:29 INFO datanode.DataNode: Deleted block blk_-3388118641670678486_1134 at file build/test/data/dfs/data/data4/current/blk_-3388118641670678486
    [junit] 12/09/21 11:48:29 INFO datanode.DataNode: Deleted block blk_9013264296899369525_1134 at file build/test/data/dfs/data/data3/current/blk_9013264296899369525
    [junit] 12/09/21 11:48:29 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/09/21 11:48:29 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:60322, storageID=DS-318392138-67.195.138.24-60322-1348227375104, infoPort=57327, ipcPort=37868):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data5/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data6/current'}>
    [junit] 12/09/21 11:48:29 INFO ipc.Server: Stopping server on 37868
    [junit] 12/09/21 11:48:29 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/21 11:48:29 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/09/21 11:48:29 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/09/21 11:48:29 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/09/21 11:48:29 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId991145128
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId991145128
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/09/21 11:48:29 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] Shutting down DataNode 1
    [junit] 12/09/21 11:48:29 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/09/21 11:48:29 INFO ipc.Server: Stopping server on 43733
    [junit] 12/09/21 11:48:29 INFO ipc.Server: IPC Server handler 0 on 43733: exiting
    [junit] 12/09/21 11:48:29 INFO ipc.Server: IPC Server handler 2 on 43733: exiting
    [junit] 12/09/21 11:48:29 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/09/21 11:48:29 INFO ipc.Server: IPC Server handler 1 on 43733: exiting
    [junit] 12/09/21 11:48:29 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/21 11:48:29 INFO ipc.Server: Stopping IPC Server listener on 43733
    [junit] 12/09/21 11:48:29 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/09/21 11:48:29 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:37651, storageID=DS-255533021-67.195.138.24-37651-1348227374125, infoPort=39550, ipcPort=43733):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/09/21 11:48:29 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/09/21 11:48:30 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/09/21 11:48:30 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/09/21 11:48:30 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:37651, storageID=DS-255533021-67.195.138.24-37651-1348227374125, infoPort=39550, ipcPort=43733):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data3/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data4/current'}>
    [junit] 12/09/21 11:48:30 INFO ipc.Server: Stopping server on 43733
    [junit] 12/09/21 11:48:30 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/21 11:48:30 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/09/21 11:48:30 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/09/21 11:48:30 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/09/21 11:48:30 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId659572024
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId659572024
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/09/21 11:48:30 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] Shutting down DataNode 0
    [junit] 12/09/21 11:48:30 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/09/21 11:48:30 INFO ipc.Server: Stopping server on 58995
    [junit] 12/09/21 11:48:30 INFO ipc.Server: Stopping IPC Server listener on 58995
    [junit] 12/09/21 11:48:30 INFO ipc.Server: IPC Server handler 1 on 58995: exiting
    [junit] 12/09/21 11:48:30 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/21 11:48:30 INFO ipc.Server: IPC Server handler 2 on 58995: exiting
    [junit] 12/09/21 11:48:30 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:54254, storageID=DS-898318503-67.195.138.24-54254-1348227373167, infoPort=49801, ipcPort=58995):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/09/21 11:48:30 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/09/21 11:48:30 INFO ipc.Server: IPC Server handler 0 on 58995: exiting
    [junit] 12/09/21 11:48:30 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/09/21 11:48:30 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/09/21 11:48:30 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/09/21 11:48:30 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:54254, storageID=DS-898318503-67.195.138.24-54254-1348227373167, infoPort=49801, ipcPort=58995):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data1/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data2/current'}>
    [junit] 12/09/21 11:48:30 WARN util.MBeans: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.unRegisterMXBean(DataNode.java:513)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:726)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.run(DataNode.java:1442)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/09/21 11:48:30 INFO ipc.Server: Stopping server on 58995
    [junit] 12/09/21 11:48:30 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/21 11:48:30 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/09/21 11:48:30 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/09/21 11:48:30 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/09/21 11:48:30 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1643577954
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1643577954
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:141)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/09/21 11:48:30 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/09/21 11:48:30 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/09/21 11:48:31 WARN namenode.FSNamesystem: ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 12/09/21 11:48:31 INFO namenode.FSNamesystem: Number of transactions: 629 Total time for transactions(ms): 4Number of transactions batched in Syncs: 126 Number of syncs: 433 SyncTimes(ms): 6883 281 
    [junit] 12/09/21 11:48:31 INFO namenode.DecommissionManager: Interrupted Monitor
    [junit] java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.hdfs.server.namenode.DecommissionManager$Monitor.run(DecommissionManager.java:65)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/09/21 11:48:31 INFO ipc.Server: Stopping server on 41722
    [junit] 12/09/21 11:48:31 INFO ipc.Server: IPC Server handler 0 on 41722: exiting
    [junit] 12/09/21 11:48:31 INFO ipc.Server: IPC Server handler 2 on 41722: exiting
    [junit] 12/09/21 11:48:31 INFO ipc.Server: IPC Server handler 1 on 41722: exiting
    [junit] 12/09/21 11:48:31 INFO ipc.Server: IPC Server handler 3 on 41722: exiting
    [junit] 12/09/21 11:48:31 INFO ipc.Server: IPC Server handler 4 on 41722: exiting
    [junit] 12/09/21 11:48:31 INFO ipc.Server: IPC Server handler 5 on 41722: exiting
    [junit] 12/09/21 11:48:31 INFO ipc.Server: IPC Server handler 6 on 41722: exiting
    [junit] 12/09/21 11:48:31 INFO ipc.Server: IPC Server handler 7 on 41722: exiting
    [junit] 12/09/21 11:48:31 INFO ipc.Server: IPC Server handler 8 on 41722: exiting
    [junit] 12/09/21 11:48:31 INFO ipc.Server: IPC Server handler 9 on 41722: exiting
    [junit] 12/09/21 11:48:31 INFO ipc.Server: Stopping IPC Server listener on 41722
    [junit] 12/09/21 11:48:31 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/21 11:48:31 INFO ipc.Server: Stopping IPC Server Responder
    [junit] Tests run: 17, Failures: 4, Errors: 3, Time elapsed: 726.212 sec
    [junit] Test org.apache.pig.test.TestStore FAILED
    [junit] Running org.apache.pig.test.TestStringUDFs
    [junit] 12/09/21 11:48:33 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.NullPointerException
    [junit] 12/09/21 11:48:33 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/09/21 11:48:33 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -1
    [junit] 12/09/21 11:48:33 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -8
    [junit] 12/09/21 11:48:33 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/09/21 11:48:33 WARN builtin.INDEXOF: No logger object provided to UDF: org.apache.pig.builtin.INDEXOF. Failed to process input; error - null
    [junit] 12/09/21 11:48:33 WARN builtin.LAST_INDEX_OF: No logger object provided to UDF: org.apache.pig.builtin.LAST_INDEX_OF. Failed to process input; error - null
    [junit] Tests run: 12, Failures: 0, Errors: 0, Time elapsed: 0.504 sec
   [delete] Deleting directory /tmp/pig_junit_tmp233463666

BUILD FAILED
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:810: The following error occurred while executing this line:
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:878: Tests failed!

Total time: 29 minutes 3 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1318

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1318/changes>

Changes:

[gates] PIG-2712 Pig does not call OutputCommitter.abortJob() on the underlying OutputFormat

------------------------------------------
[...truncated 6936 lines...]
 [findbugs]   com.google.common.util.concurrent.CheckedFuture
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Reader$Scanner$Entry
 [findbugs]   org.apache.hadoop.fs.FSDataInputStream
 [findbugs]   org.python.core.PyObject
 [findbugs]   jline.History
 [findbugs]   org.jruby.embed.internal.LocalContextProvider
 [findbugs]   org.apache.hadoop.io.BooleanWritable
 [findbugs]   org.apache.log4j.Logger
 [findbugs]   org.apache.hadoop.hbase.filter.FamilyFilter
 [findbugs]   groovy.lang.Tuple
 [findbugs]   org.antlr.runtime.IntStream
 [findbugs]   org.apache.hadoop.util.ReflectionUtils
 [findbugs]   org.apache.hadoop.fs.ContentSummary
 [findbugs]   org.jruby.runtime.builtin.IRubyObject
 [findbugs]   org.jruby.RubyInteger
 [findbugs]   org.python.core.PyTuple
 [findbugs]   org.mortbay.log.Log
 [findbugs]   org.apache.hadoop.conf.Configuration
 [findbugs]   com.google.common.base.Joiner
 [findbugs]   org.apache.hadoop.mapreduce.lib.input.FileSplit
 [findbugs]   org.apache.hadoop.mapred.Counters$Counter
 [findbugs]   com.jcraft.jsch.Channel
 [findbugs]   org.apache.hadoop.mapred.JobPriority
 [findbugs]   org.apache.commons.cli.Options
 [findbugs]   org.apache.hadoop.mapred.JobID
 [findbugs]   org.apache.hadoop.util.bloom.BloomFilter
 [findbugs]   org.python.core.PyFrame
 [findbugs]   org.apache.hadoop.hbase.filter.CompareFilter
 [findbugs]   org.apache.hadoop.util.VersionInfo
 [findbugs]   org.python.core.PyString
 [findbugs]   org.apache.hadoop.io.Text$Comparator
 [findbugs]   org.jruby.runtime.Block
 [findbugs]   org.antlr.runtime.MismatchedSetException
 [findbugs]   org.apache.hadoop.io.BytesWritable
 [findbugs]   org.apache.hadoop.fs.FsShell
 [findbugs]   org.joda.time.Months
 [findbugs]   org.mozilla.javascript.ImporterTopLevel
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableOutputFormat
 [findbugs]   org.apache.hadoop.mapred.TaskReport
 [findbugs]   org.apache.hadoop.security.UserGroupInformation
 [findbugs]   org.antlr.runtime.tree.RewriteRuleSubtreeStream
 [findbugs]   org.apache.commons.cli.HelpFormatter
 [findbugs]   com.google.common.collect.Maps
 [findbugs]   org.joda.time.ReadableInstant
 [findbugs]   org.mozilla.javascript.NativeObject
 [findbugs]   org.apache.hadoop.hbase.HConstants
 [findbugs]   org.apache.hadoop.io.serializer.Deserializer
 [findbugs]   org.antlr.runtime.FailedPredicateException
 [findbugs]   org.apache.hadoop.io.compress.CompressionCodec
 [findbugs]   org.jruby.RubyNil
 [findbugs]   org.apache.hadoop.fs.FileStatus
 [findbugs]   org.apache.hadoop.hbase.client.Result
 [findbugs]   org.apache.hadoop.mapreduce.JobContext
 [findbugs]   org.codehaus.jackson.JsonGenerator
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptContext
 [findbugs]   org.apache.hadoop.io.BytesWritable$Comparator
 [findbugs]   org.apache.hadoop.io.LongWritable$Comparator
 [findbugs]   org.codehaus.jackson.map.util.LRUMap
 [findbugs]   org.apache.hadoop.hbase.util.Bytes
 [findbugs]   org.antlr.runtime.MismatchedTokenException
 [findbugs]   org.codehaus.jackson.JsonParser
 [findbugs]   com.jcraft.jsch.UserInfo
 [findbugs]   org.python.core.PyException
 [findbugs]   org.apache.commons.cli.ParseException
 [findbugs]   org.apache.hadoop.io.compress.CompressionOutputStream
 [findbugs]   org.apache.hadoop.hbase.filter.WritableByteArrayComparable
 [findbugs]   org.antlr.runtime.tree.CommonTreeNodeStream
 [findbugs]   org.apache.log4j.Level
 [findbugs]   org.apache.hadoop.hbase.client.Scan
 [findbugs]   org.jruby.anno.JRubyMethod
 [findbugs]   org.apache.hadoop.mapreduce.Job
 [findbugs]   com.google.common.util.concurrent.Futures
 [findbugs]   org.apache.commons.logging.LogFactory
 [findbugs]   org.apache.commons.codec.binary.Base64
 [findbugs]   org.codehaus.jackson.map.ObjectMapper
 [findbugs]   org.apache.hadoop.fs.FileSystem
 [findbugs]   org.jruby.embed.LocalContextScope
 [findbugs]   org.apache.hadoop.hbase.filter.FilterList$Operator
 [findbugs]   org.jruby.RubySymbol
 [findbugs]   org.apache.hadoop.hbase.io.ImmutableBytesWritable
 [findbugs]   org.apache.hadoop.io.serializer.SerializationFactory
 [findbugs]   org.antlr.runtime.tree.TreeAdaptor
 [findbugs]   org.apache.hadoop.mapred.RunningJob
 [findbugs]   org.antlr.runtime.CommonTokenStream
 [findbugs]   org.apache.hadoop.io.DataInputBuffer
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile
 [findbugs]   org.apache.commons.cli.GnuParser
 [findbugs]   org.mozilla.javascript.Context
 [findbugs]   org.apache.hadoop.io.FloatWritable
 [findbugs]   org.antlr.runtime.tree.RewriteEarlyExitException
 [findbugs]   org.apache.hadoop.hbase.HBaseConfiguration
 [findbugs]   org.codehaus.jackson.JsonGenerationException
 [findbugs]   org.apache.hadoop.mapreduce.TaskInputOutputContext
 [findbugs]   org.apache.hadoop.io.compress.GzipCodec
 [findbugs]   org.jruby.RubyString
 [findbugs]   org.antlr.runtime.BaseRecognizer
 [findbugs]   org.apache.hadoop.fs.FileUtil
 [findbugs]   org.apache.hadoop.fs.Path
 [findbugs]   org.jruby.RubyFixnum
 [findbugs]   org.apache.hadoop.hbase.client.Put
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Writer
 [findbugs]   jline.ConsoleReader
 [findbugs]   com.google.common.collect.Lists
 [findbugs]   org.apache.hadoop.mapreduce.MapContext
 [findbugs]   org.python.core.PyJavaPackage
 [findbugs]   org.apache.hadoop.hbase.filter.ColumnPrefixFilter
 [findbugs]   org.python.core.PyStringMap
 [findbugs]   groovy.util.ResourceException
 [findbugs]   org.joda.time.format.DateTimeFormat
 [findbugs]   org.apache.hadoop.mapreduce.TaskID
 [findbugs]   org.joda.time.Period
 [findbugs]   org.jruby.embed.ScriptingContainer
 [findbugs]   org.jruby.Ruby
 [findbugs]   groovy.util.GroovyScriptEngine
 [findbugs]   org.apache.hadoop.hbase.client.HTable
 [findbugs]   org.apache.hadoop.io.FloatWritable$Comparator
 [findbugs]   org.apache.zookeeper.ZooKeeper
 [findbugs]   org.codehaus.jackson.map.JsonMappingException
 [findbugs]   org.python.core.PyFunction
 [findbugs]   org.antlr.runtime.TokenSource
 [findbugs]   com.google.common.io.Files
 [findbugs]   groovy.util.ScriptException
 [findbugs]   com.jcraft.jsch.ChannelDirectTCPIP
 [findbugs]   com.jcraft.jsch.JSchException
 [findbugs]   org.python.util.PythonInterpreter
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil
 [findbugs]   org.python.core.PyInteger
 [findbugs]   org.apache.hadoop.mapred.JobConf
 [findbugs]   org.joda.time.DateTime
 [findbugs]   org.apache.hadoop.util.bloom.Key
 [findbugs]   org.joda.time.Years
 [findbugs]   org.apache.hadoop.io.Text
 [findbugs]   org.antlr.runtime.NoViableAltException
 [findbugs]   org.apache.hadoop.util.GenericOptionsParser
 [findbugs]   org.apache.hadoop.mapreduce.JobID
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptID
 [findbugs]   org.apache.hadoop.filecache.DistributedCache
 [findbugs]   org.joda.time.DateTimeZone
 [findbugs]   org.apache.hadoop.fs.FSDataOutputStream
 [findbugs]   org.python.core.PyList
 [findbugs]   org.antlr.runtime.tree.TreeNodeStream
 [findbugs]   org.apache.hadoop.hbase.filter.BinaryComparator
 [findbugs]   dk.brics.automaton.RegExp
 [findbugs]   org.mozilla.javascript.Scriptable
 [findbugs]   org.mozilla.javascript.EcmaError
 [findbugs]   org.apache.hadoop.io.serializer.Serializer
 [findbugs]   org.fusesource.jansi.AnsiConsole
 [findbugs]   org.apache.hadoop.util.bloom.Filter
 [findbugs]   org.python.core.PyNone
 [findbugs]   org.mozilla.javascript.Function
 [findbugs]   org.python.core.PySystemState
 [findbugs]   org.antlr.runtime.RecognizerSharedState
 [findbugs]   org.codehaus.jackson.JsonFactory
 [findbugs]   org.antlr.runtime.EarlyExitException
 [findbugs]   org.apache.hadoop.hdfs.DistributedFileSystem
 [findbugs]   org.apache.hadoop.util.LineReader
 [findbugs] Warnings generated: 46
 [findbugs] Missing classes: 284
 [findbugs] Calculating exit code...
 [findbugs] Setting 'missing class' flag (2)
 [findbugs] Setting 'bugs found' flag (1)
 [findbugs] Exit code set to: 3
 [findbugs] Java Result: 3
 [findbugs] Classes needed for analysis were missing
 [findbugs] Output saved to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml>
     [xslt] Processing <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml> to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.html>
     [xslt] Loading stylesheet /home/jenkins/tools/findbugs/latest/src/xsl/default.xsl

BUILD SUCCESSFUL
Total time: 6 minutes 34 seconds


======================================================================
======================================================================
STORE: saving artifacts
======================================================================
======================================================================




======================================================================
======================================================================
CLEAN: cleaning workspace
======================================================================
======================================================================


Buildfile: build.xml

clean:
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src-gen>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/docs/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/test/org/apache/pig/test/utils/dotGraph/parser>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig.jar>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig-withouthadoop.jar>

clean:

clean:

BUILD SUCCESSFUL
Total time: 0 seconds


======================================================================
======================================================================
ANALYSIS: ant -Drun.clover=true -Dclover.home=/homes/hudson/tools/clover/latest clover test-commit generate-clover-reports -Dtest.junit.output.format=xml -Dtest.output=yes -Dversion=${BUILD_ID} -Dfindbugs.home=$FINDBUGS_HOME -Djava5.home=$JAVA5_HOME -Dforrest.home=$FORREST_HOME -Dclover.home=$CLOVER_HOME -Declipse.home=$ECLIPSE_HOME
======================================================================
======================================================================


Buildfile: build.xml

clover.setup:
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/clover/db>
[clover-setup] Clover Version 3.1.0, built on May 31 2011 (build-821)
[clover-setup] Loaded from: /home/jenkins/tools/clover/latest/lib/clover.jar

BUILD FAILED
java.lang.RuntimeException: Clover upgrades for your license ended December 14 2010, and this version of Clover was built May 31 2011. Please visit http://www.atlassian.com/clover/renew for information on upgrading your license.
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:103)
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:25)
	at com.cenqua.clover.tasks.AbstractCloverTask.execute(AbstractCloverTask.java:52)
	at org.apache.tools.ant.UnknownElement.execute(UnknownElement.java:288)
	at sun.reflect.GeneratedMethodAccessor1.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
	at java.lang.reflect.Method.invoke(Method.java:597)
	at org.apache.tools.ant.dispatch.DispatchUtils.execute(DispatchUtils.java:106)
	at org.apache.tools.ant.Task.perform(Task.java:348)
	at org.apache.tools.ant.Target.execute(Target.java:357)
	at org.apache.tools.ant.Target.performTasks(Target.java:385)
	at org.apache.tools.ant.Project.executeSortedTargets(Project.java:1337)
	at org.apache.tools.ant.Project.executeTarget(Project.java:1306)
	at org.apache.tools.ant.helper.DefaultExecutor.executeTargets(DefaultExecutor.java:41)
	at org.apache.tools.ant.Project.executeTargets(Project.java:1189)
	at org.apache.tools.ant.Main.runBuild(Main.java:758)
	at org.apache.tools.ant.Main.startAnt(Main.java:217)
	at org.apache.tools.ant.launch.Launcher.run(Launcher.java:257)
	at org.apache.tools.ant.launch.Launcher.main(Launcher.java:104)

Total time: 1 second
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1317

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1317/changes>

Changes:

[dvryaboy] PIG-2918: Avoid Spillable bag overhead where possible

------------------------------------------
[...truncated 6606 lines...]
 [findbugs]   com.google.common.util.concurrent.CheckedFuture
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Reader$Scanner$Entry
 [findbugs]   org.apache.hadoop.fs.FSDataInputStream
 [findbugs]   org.python.core.PyObject
 [findbugs]   jline.History
 [findbugs]   org.jruby.embed.internal.LocalContextProvider
 [findbugs]   org.apache.hadoop.io.BooleanWritable
 [findbugs]   org.apache.log4j.Logger
 [findbugs]   org.apache.hadoop.hbase.filter.FamilyFilter
 [findbugs]   groovy.lang.Tuple
 [findbugs]   org.antlr.runtime.IntStream
 [findbugs]   org.apache.hadoop.util.ReflectionUtils
 [findbugs]   org.apache.hadoop.fs.ContentSummary
 [findbugs]   org.jruby.runtime.builtin.IRubyObject
 [findbugs]   org.jruby.RubyInteger
 [findbugs]   org.python.core.PyTuple
 [findbugs]   org.mortbay.log.Log
 [findbugs]   org.apache.hadoop.conf.Configuration
 [findbugs]   com.google.common.base.Joiner
 [findbugs]   org.apache.hadoop.mapreduce.lib.input.FileSplit
 [findbugs]   org.apache.hadoop.mapred.Counters$Counter
 [findbugs]   com.jcraft.jsch.Channel
 [findbugs]   org.apache.hadoop.mapred.JobPriority
 [findbugs]   org.apache.commons.cli.Options
 [findbugs]   org.apache.hadoop.mapred.JobID
 [findbugs]   org.apache.hadoop.util.bloom.BloomFilter
 [findbugs]   org.python.core.PyFrame
 [findbugs]   org.apache.hadoop.hbase.filter.CompareFilter
 [findbugs]   org.apache.hadoop.util.VersionInfo
 [findbugs]   org.python.core.PyString
 [findbugs]   org.apache.hadoop.io.Text$Comparator
 [findbugs]   org.jruby.runtime.Block
 [findbugs]   org.antlr.runtime.MismatchedSetException
 [findbugs]   org.apache.hadoop.io.BytesWritable
 [findbugs]   org.apache.hadoop.fs.FsShell
 [findbugs]   org.joda.time.Months
 [findbugs]   org.mozilla.javascript.ImporterTopLevel
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableOutputFormat
 [findbugs]   org.apache.hadoop.mapred.TaskReport
 [findbugs]   org.apache.hadoop.security.UserGroupInformation
 [findbugs]   org.antlr.runtime.tree.RewriteRuleSubtreeStream
 [findbugs]   org.apache.commons.cli.HelpFormatter
 [findbugs]   com.google.common.collect.Maps
 [findbugs]   org.joda.time.ReadableInstant
 [findbugs]   org.mozilla.javascript.NativeObject
 [findbugs]   org.apache.hadoop.hbase.HConstants
 [findbugs]   org.apache.hadoop.io.serializer.Deserializer
 [findbugs]   org.antlr.runtime.FailedPredicateException
 [findbugs]   org.apache.hadoop.io.compress.CompressionCodec
 [findbugs]   org.jruby.RubyNil
 [findbugs]   org.apache.hadoop.fs.FileStatus
 [findbugs]   org.apache.hadoop.hbase.client.Result
 [findbugs]   org.apache.hadoop.mapreduce.JobContext
 [findbugs]   org.codehaus.jackson.JsonGenerator
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptContext
 [findbugs]   org.apache.hadoop.io.BytesWritable$Comparator
 [findbugs]   org.apache.hadoop.io.LongWritable$Comparator
 [findbugs]   org.codehaus.jackson.map.util.LRUMap
 [findbugs]   org.apache.hadoop.hbase.util.Bytes
 [findbugs]   org.antlr.runtime.MismatchedTokenException
 [findbugs]   org.codehaus.jackson.JsonParser
 [findbugs]   com.jcraft.jsch.UserInfo
 [findbugs]   org.python.core.PyException
 [findbugs]   org.apache.commons.cli.ParseException
 [findbugs]   org.apache.hadoop.io.compress.CompressionOutputStream
 [findbugs]   org.apache.hadoop.hbase.filter.WritableByteArrayComparable
 [findbugs]   org.antlr.runtime.tree.CommonTreeNodeStream
 [findbugs]   org.apache.log4j.Level
 [findbugs]   org.apache.hadoop.hbase.client.Scan
 [findbugs]   org.jruby.anno.JRubyMethod
 [findbugs]   org.apache.hadoop.mapreduce.Job
 [findbugs]   com.google.common.util.concurrent.Futures
 [findbugs]   org.apache.commons.logging.LogFactory
 [findbugs]   org.apache.commons.codec.binary.Base64
 [findbugs]   org.codehaus.jackson.map.ObjectMapper
 [findbugs]   org.apache.hadoop.fs.FileSystem
 [findbugs]   org.jruby.embed.LocalContextScope
 [findbugs]   org.apache.hadoop.hbase.filter.FilterList$Operator
 [findbugs]   org.jruby.RubySymbol
 [findbugs]   org.apache.hadoop.hbase.io.ImmutableBytesWritable
 [findbugs]   org.apache.hadoop.io.serializer.SerializationFactory
 [findbugs]   org.antlr.runtime.tree.TreeAdaptor
 [findbugs]   org.apache.hadoop.mapred.RunningJob
 [findbugs]   org.antlr.runtime.CommonTokenStream
 [findbugs]   org.apache.hadoop.io.DataInputBuffer
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile
 [findbugs]   org.apache.commons.cli.GnuParser
 [findbugs]   org.mozilla.javascript.Context
 [findbugs]   org.apache.hadoop.io.FloatWritable
 [findbugs]   org.antlr.runtime.tree.RewriteEarlyExitException
 [findbugs]   org.apache.hadoop.hbase.HBaseConfiguration
 [findbugs]   org.codehaus.jackson.JsonGenerationException
 [findbugs]   org.apache.hadoop.mapreduce.TaskInputOutputContext
 [findbugs]   org.apache.hadoop.io.compress.GzipCodec
 [findbugs]   org.jruby.RubyString
 [findbugs]   org.antlr.runtime.BaseRecognizer
 [findbugs]   org.apache.hadoop.fs.FileUtil
 [findbugs]   org.apache.hadoop.fs.Path
 [findbugs]   org.jruby.RubyFixnum
 [findbugs]   org.apache.hadoop.hbase.client.Put
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Writer
 [findbugs]   jline.ConsoleReader
 [findbugs]   com.google.common.collect.Lists
 [findbugs]   org.apache.hadoop.mapreduce.MapContext
 [findbugs]   org.python.core.PyJavaPackage
 [findbugs]   org.apache.hadoop.hbase.filter.ColumnPrefixFilter
 [findbugs]   org.python.core.PyStringMap
 [findbugs]   groovy.util.ResourceException
 [findbugs]   org.joda.time.format.DateTimeFormat
 [findbugs]   org.apache.hadoop.mapreduce.TaskID
 [findbugs]   org.joda.time.Period
 [findbugs]   org.jruby.embed.ScriptingContainer
 [findbugs]   org.jruby.Ruby
 [findbugs]   groovy.util.GroovyScriptEngine
 [findbugs]   org.apache.hadoop.hbase.client.HTable
 [findbugs]   org.apache.hadoop.io.FloatWritable$Comparator
 [findbugs]   org.apache.zookeeper.ZooKeeper
 [findbugs]   org.codehaus.jackson.map.JsonMappingException
 [findbugs]   org.python.core.PyFunction
 [findbugs]   org.antlr.runtime.TokenSource
 [findbugs]   com.google.common.io.Files
 [findbugs]   groovy.util.ScriptException
 [findbugs]   com.jcraft.jsch.ChannelDirectTCPIP
 [findbugs]   com.jcraft.jsch.JSchException
 [findbugs]   org.python.util.PythonInterpreter
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil
 [findbugs]   org.python.core.PyInteger
 [findbugs]   org.apache.hadoop.mapred.JobConf
 [findbugs]   org.joda.time.DateTime
 [findbugs]   org.apache.hadoop.util.bloom.Key
 [findbugs]   org.joda.time.Years
 [findbugs]   org.apache.hadoop.io.Text
 [findbugs]   org.antlr.runtime.NoViableAltException
 [findbugs]   org.apache.hadoop.util.GenericOptionsParser
 [findbugs]   org.apache.hadoop.mapreduce.JobID
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptID
 [findbugs]   org.apache.hadoop.filecache.DistributedCache
 [findbugs]   org.joda.time.DateTimeZone
 [findbugs]   org.apache.hadoop.fs.FSDataOutputStream
 [findbugs]   org.python.core.PyList
 [findbugs]   org.antlr.runtime.tree.TreeNodeStream
 [findbugs]   org.apache.hadoop.hbase.filter.BinaryComparator
 [findbugs]   dk.brics.automaton.RegExp
 [findbugs]   org.mozilla.javascript.Scriptable
 [findbugs]   org.mozilla.javascript.EcmaError
 [findbugs]   org.apache.hadoop.io.serializer.Serializer
 [findbugs]   org.fusesource.jansi.AnsiConsole
 [findbugs]   org.apache.hadoop.util.bloom.Filter
 [findbugs]   org.python.core.PyNone
 [findbugs]   org.mozilla.javascript.Function
 [findbugs]   org.python.core.PySystemState
 [findbugs]   org.antlr.runtime.RecognizerSharedState
 [findbugs]   org.codehaus.jackson.JsonFactory
 [findbugs]   org.antlr.runtime.EarlyExitException
 [findbugs]   org.apache.hadoop.hdfs.DistributedFileSystem
 [findbugs]   org.apache.hadoop.util.LineReader
 [findbugs] Warnings generated: 46
 [findbugs] Missing classes: 282
 [findbugs] Calculating exit code...
 [findbugs] Setting 'missing class' flag (2)
 [findbugs] Setting 'bugs found' flag (1)
 [findbugs] Exit code set to: 3
 [findbugs] Java Result: 3
 [findbugs] Classes needed for analysis were missing
 [findbugs] Output saved to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml>
     [xslt] Processing <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml> to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.html>
     [xslt] Loading stylesheet /home/jenkins/tools/findbugs/latest/src/xsl/default.xsl

BUILD SUCCESSFUL
Total time: 5 minutes 24 seconds


======================================================================
======================================================================
STORE: saving artifacts
======================================================================
======================================================================




======================================================================
======================================================================
CLEAN: cleaning workspace
======================================================================
======================================================================


Buildfile: build.xml

clean:
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src-gen>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/docs/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/test/org/apache/pig/test/utils/dotGraph/parser>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig.jar>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig-withouthadoop.jar>

clean:

clean:

BUILD SUCCESSFUL
Total time: 0 seconds


======================================================================
======================================================================
ANALYSIS: ant -Drun.clover=true -Dclover.home=/homes/hudson/tools/clover/latest clover test-commit generate-clover-reports -Dtest.junit.output.format=xml -Dtest.output=yes -Dversion=${BUILD_ID} -Dfindbugs.home=$FINDBUGS_HOME -Djava5.home=$JAVA5_HOME -Dforrest.home=$FORREST_HOME -Dclover.home=$CLOVER_HOME -Declipse.home=$ECLIPSE_HOME
======================================================================
======================================================================


Buildfile: build.xml

clover.setup:
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/clover/db>
[clover-setup] Clover Version 3.1.0, built on May 31 2011 (build-821)
[clover-setup] Loaded from: /home/jenkins/tools/clover/latest/lib/clover.jar

BUILD FAILED
java.lang.RuntimeException: Clover upgrades for your license ended December 14 2010, and this version of Clover was built May 31 2011. Please visit http://www.atlassian.com/clover/renew for information on upgrading your license.
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:103)
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:25)
	at com.cenqua.clover.tasks.AbstractCloverTask.execute(AbstractCloverTask.java:52)
	at org.apache.tools.ant.UnknownElement.execute(UnknownElement.java:288)
	at sun.reflect.GeneratedMethodAccessor1.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
	at java.lang.reflect.Method.invoke(Method.java:597)
	at org.apache.tools.ant.dispatch.DispatchUtils.execute(DispatchUtils.java:106)
	at org.apache.tools.ant.Task.perform(Task.java:348)
	at org.apache.tools.ant.Target.execute(Target.java:357)
	at org.apache.tools.ant.Target.performTasks(Target.java:385)
	at org.apache.tools.ant.Project.executeSortedTargets(Project.java:1337)
	at org.apache.tools.ant.Project.executeTarget(Project.java:1306)
	at org.apache.tools.ant.helper.DefaultExecutor.executeTargets(DefaultExecutor.java:41)
	at org.apache.tools.ant.Project.executeTargets(Project.java:1189)
	at org.apache.tools.ant.Main.runBuild(Main.java:758)
	at org.apache.tools.ant.Main.startAnt(Main.java:217)
	at org.apache.tools.ant.launch.Launcher.run(Launcher.java:257)
	at org.apache.tools.ant.launch.Launcher.main(Launcher.java:104)

Total time: 0 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1316

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1316/changes>

Changes:

[daijy] PIG-2744: Handle Pig command line with XML special characters

[dvryaboy] PIG-2900: Streaming should provide conf settings in the environment

[gates] PIG-2909 Add a new option for ignoring corrupted files to AvroStorage load func

[daijy] PIG-2637: Command-line option -e throws TokenMgrError exception

[gdfm] PIG-2353: RANK function like in SQL (xalan via azaroth)

[gates] PIG-2887 Macro cannot handle negative number

------------------------------------------
[...truncated 6900 lines...]
 [findbugs]   com.google.common.util.concurrent.CheckedFuture
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Reader$Scanner$Entry
 [findbugs]   org.apache.hadoop.fs.FSDataInputStream
 [findbugs]   org.python.core.PyObject
 [findbugs]   jline.History
 [findbugs]   org.jruby.embed.internal.LocalContextProvider
 [findbugs]   org.apache.hadoop.io.BooleanWritable
 [findbugs]   org.apache.log4j.Logger
 [findbugs]   org.apache.hadoop.hbase.filter.FamilyFilter
 [findbugs]   groovy.lang.Tuple
 [findbugs]   org.antlr.runtime.IntStream
 [findbugs]   org.apache.hadoop.util.ReflectionUtils
 [findbugs]   org.apache.hadoop.fs.ContentSummary
 [findbugs]   org.jruby.runtime.builtin.IRubyObject
 [findbugs]   org.jruby.RubyInteger
 [findbugs]   org.python.core.PyTuple
 [findbugs]   org.mortbay.log.Log
 [findbugs]   org.apache.hadoop.conf.Configuration
 [findbugs]   com.google.common.base.Joiner
 [findbugs]   org.apache.hadoop.mapreduce.lib.input.FileSplit
 [findbugs]   org.apache.hadoop.mapred.Counters$Counter
 [findbugs]   com.jcraft.jsch.Channel
 [findbugs]   org.apache.hadoop.mapred.JobPriority
 [findbugs]   org.apache.commons.cli.Options
 [findbugs]   org.apache.hadoop.mapred.JobID
 [findbugs]   org.apache.hadoop.util.bloom.BloomFilter
 [findbugs]   org.python.core.PyFrame
 [findbugs]   org.apache.hadoop.hbase.filter.CompareFilter
 [findbugs]   org.apache.hadoop.util.VersionInfo
 [findbugs]   org.python.core.PyString
 [findbugs]   org.apache.hadoop.io.Text$Comparator
 [findbugs]   org.jruby.runtime.Block
 [findbugs]   org.antlr.runtime.MismatchedSetException
 [findbugs]   org.apache.hadoop.io.BytesWritable
 [findbugs]   org.apache.hadoop.fs.FsShell
 [findbugs]   org.joda.time.Months
 [findbugs]   org.mozilla.javascript.ImporterTopLevel
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableOutputFormat
 [findbugs]   org.apache.hadoop.mapred.TaskReport
 [findbugs]   org.apache.hadoop.security.UserGroupInformation
 [findbugs]   org.antlr.runtime.tree.RewriteRuleSubtreeStream
 [findbugs]   org.apache.commons.cli.HelpFormatter
 [findbugs]   com.google.common.collect.Maps
 [findbugs]   org.joda.time.ReadableInstant
 [findbugs]   org.mozilla.javascript.NativeObject
 [findbugs]   org.apache.hadoop.hbase.HConstants
 [findbugs]   org.apache.hadoop.io.serializer.Deserializer
 [findbugs]   org.antlr.runtime.FailedPredicateException
 [findbugs]   org.apache.hadoop.io.compress.CompressionCodec
 [findbugs]   org.jruby.RubyNil
 [findbugs]   org.apache.hadoop.fs.FileStatus
 [findbugs]   org.apache.hadoop.hbase.client.Result
 [findbugs]   org.apache.hadoop.mapreduce.JobContext
 [findbugs]   org.codehaus.jackson.JsonGenerator
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptContext
 [findbugs]   org.apache.hadoop.io.BytesWritable$Comparator
 [findbugs]   org.apache.hadoop.io.LongWritable$Comparator
 [findbugs]   org.codehaus.jackson.map.util.LRUMap
 [findbugs]   org.apache.hadoop.hbase.util.Bytes
 [findbugs]   org.antlr.runtime.MismatchedTokenException
 [findbugs]   org.codehaus.jackson.JsonParser
 [findbugs]   com.jcraft.jsch.UserInfo
 [findbugs]   org.python.core.PyException
 [findbugs]   org.apache.commons.cli.ParseException
 [findbugs]   org.apache.hadoop.io.compress.CompressionOutputStream
 [findbugs]   org.apache.hadoop.hbase.filter.WritableByteArrayComparable
 [findbugs]   org.antlr.runtime.tree.CommonTreeNodeStream
 [findbugs]   org.apache.log4j.Level
 [findbugs]   org.apache.hadoop.hbase.client.Scan
 [findbugs]   org.jruby.anno.JRubyMethod
 [findbugs]   org.apache.hadoop.mapreduce.Job
 [findbugs]   com.google.common.util.concurrent.Futures
 [findbugs]   org.apache.commons.logging.LogFactory
 [findbugs]   org.apache.commons.codec.binary.Base64
 [findbugs]   org.codehaus.jackson.map.ObjectMapper
 [findbugs]   org.apache.hadoop.fs.FileSystem
 [findbugs]   org.jruby.embed.LocalContextScope
 [findbugs]   org.apache.hadoop.hbase.filter.FilterList$Operator
 [findbugs]   org.jruby.RubySymbol
 [findbugs]   org.apache.hadoop.hbase.io.ImmutableBytesWritable
 [findbugs]   org.apache.hadoop.io.serializer.SerializationFactory
 [findbugs]   org.antlr.runtime.tree.TreeAdaptor
 [findbugs]   org.apache.hadoop.mapred.RunningJob
 [findbugs]   org.antlr.runtime.CommonTokenStream
 [findbugs]   org.apache.hadoop.io.DataInputBuffer
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile
 [findbugs]   org.apache.commons.cli.GnuParser
 [findbugs]   org.mozilla.javascript.Context
 [findbugs]   org.apache.hadoop.io.FloatWritable
 [findbugs]   org.antlr.runtime.tree.RewriteEarlyExitException
 [findbugs]   org.apache.hadoop.hbase.HBaseConfiguration
 [findbugs]   org.codehaus.jackson.JsonGenerationException
 [findbugs]   org.apache.hadoop.mapreduce.TaskInputOutputContext
 [findbugs]   org.apache.hadoop.io.compress.GzipCodec
 [findbugs]   org.jruby.RubyString
 [findbugs]   org.antlr.runtime.BaseRecognizer
 [findbugs]   org.apache.hadoop.fs.FileUtil
 [findbugs]   org.apache.hadoop.fs.Path
 [findbugs]   org.jruby.RubyFixnum
 [findbugs]   org.apache.hadoop.hbase.client.Put
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Writer
 [findbugs]   jline.ConsoleReader
 [findbugs]   com.google.common.collect.Lists
 [findbugs]   org.apache.hadoop.mapreduce.MapContext
 [findbugs]   org.python.core.PyJavaPackage
 [findbugs]   org.apache.hadoop.hbase.filter.ColumnPrefixFilter
 [findbugs]   org.python.core.PyStringMap
 [findbugs]   groovy.util.ResourceException
 [findbugs]   org.joda.time.format.DateTimeFormat
 [findbugs]   org.apache.hadoop.mapreduce.TaskID
 [findbugs]   org.joda.time.Period
 [findbugs]   org.jruby.embed.ScriptingContainer
 [findbugs]   org.jruby.Ruby
 [findbugs]   groovy.util.GroovyScriptEngine
 [findbugs]   org.apache.hadoop.hbase.client.HTable
 [findbugs]   org.apache.hadoop.io.FloatWritable$Comparator
 [findbugs]   org.apache.zookeeper.ZooKeeper
 [findbugs]   org.codehaus.jackson.map.JsonMappingException
 [findbugs]   org.python.core.PyFunction
 [findbugs]   org.antlr.runtime.TokenSource
 [findbugs]   com.google.common.io.Files
 [findbugs]   groovy.util.ScriptException
 [findbugs]   com.jcraft.jsch.ChannelDirectTCPIP
 [findbugs]   com.jcraft.jsch.JSchException
 [findbugs]   org.python.util.PythonInterpreter
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil
 [findbugs]   org.python.core.PyInteger
 [findbugs]   org.apache.hadoop.mapred.JobConf
 [findbugs]   org.joda.time.DateTime
 [findbugs]   org.apache.hadoop.util.bloom.Key
 [findbugs]   org.joda.time.Years
 [findbugs]   org.apache.hadoop.io.Text
 [findbugs]   org.antlr.runtime.NoViableAltException
 [findbugs]   org.apache.hadoop.util.GenericOptionsParser
 [findbugs]   org.apache.hadoop.mapreduce.JobID
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptID
 [findbugs]   org.apache.hadoop.filecache.DistributedCache
 [findbugs]   org.joda.time.DateTimeZone
 [findbugs]   org.apache.hadoop.fs.FSDataOutputStream
 [findbugs]   org.python.core.PyList
 [findbugs]   org.antlr.runtime.tree.TreeNodeStream
 [findbugs]   org.apache.hadoop.hbase.filter.BinaryComparator
 [findbugs]   dk.brics.automaton.RegExp
 [findbugs]   org.mozilla.javascript.Scriptable
 [findbugs]   org.mozilla.javascript.EcmaError
 [findbugs]   org.apache.hadoop.io.serializer.Serializer
 [findbugs]   org.fusesource.jansi.AnsiConsole
 [findbugs]   org.apache.hadoop.util.bloom.Filter
 [findbugs]   org.python.core.PyNone
 [findbugs]   org.mozilla.javascript.Function
 [findbugs]   org.python.core.PySystemState
 [findbugs]   org.antlr.runtime.RecognizerSharedState
 [findbugs]   org.codehaus.jackson.JsonFactory
 [findbugs]   org.antlr.runtime.EarlyExitException
 [findbugs]   org.apache.hadoop.hdfs.DistributedFileSystem
 [findbugs]   org.apache.hadoop.util.LineReader
 [findbugs] Warnings generated: 46
 [findbugs] Missing classes: 282
 [findbugs] Calculating exit code...
 [findbugs] Setting 'missing class' flag (2)
 [findbugs] Setting 'bugs found' flag (1)
 [findbugs] Exit code set to: 3
 [findbugs] Java Result: 3
 [findbugs] Classes needed for analysis were missing
 [findbugs] Output saved to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml>
     [xslt] Processing <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml> to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.html>
     [xslt] Loading stylesheet /home/jenkins/tools/findbugs/latest/src/xsl/default.xsl

BUILD SUCCESSFUL
Total time: 6 minutes 19 seconds


======================================================================
======================================================================
STORE: saving artifacts
======================================================================
======================================================================




======================================================================
======================================================================
CLEAN: cleaning workspace
======================================================================
======================================================================


Buildfile: build.xml

clean:
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src-gen>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/docs/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/test/org/apache/pig/test/utils/dotGraph/parser>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig.jar>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig-withouthadoop.jar>

clean:

clean:

BUILD SUCCESSFUL
Total time: 0 seconds


======================================================================
======================================================================
ANALYSIS: ant -Drun.clover=true -Dclover.home=/homes/hudson/tools/clover/latest clover test-commit generate-clover-reports -Dtest.junit.output.format=xml -Dtest.output=yes -Dversion=${BUILD_ID} -Dfindbugs.home=$FINDBUGS_HOME -Djava5.home=$JAVA5_HOME -Dforrest.home=$FORREST_HOME -Dclover.home=$CLOVER_HOME -Declipse.home=$ECLIPSE_HOME
======================================================================
======================================================================


Buildfile: build.xml

clover.setup:
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/clover/db>
[clover-setup] Clover Version 3.1.0, built on May 31 2011 (build-821)
[clover-setup] Loaded from: /home/jenkins/tools/clover/latest/lib/clover.jar

BUILD FAILED
java.lang.RuntimeException: Clover upgrades for your license ended December 14 2010, and this version of Clover was built May 31 2011. Please visit http://www.atlassian.com/clover/renew for information on upgrading your license.
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:103)
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:25)
	at com.cenqua.clover.tasks.AbstractCloverTask.execute(AbstractCloverTask.java:52)
	at org.apache.tools.ant.UnknownElement.execute(UnknownElement.java:288)
	at sun.reflect.GeneratedMethodAccessor1.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
	at java.lang.reflect.Method.invoke(Method.java:597)
	at org.apache.tools.ant.dispatch.DispatchUtils.execute(DispatchUtils.java:106)
	at org.apache.tools.ant.Task.perform(Task.java:348)
	at org.apache.tools.ant.Target.execute(Target.java:357)
	at org.apache.tools.ant.Target.performTasks(Target.java:385)
	at org.apache.tools.ant.Project.executeSortedTargets(Project.java:1337)
	at org.apache.tools.ant.Project.executeTarget(Project.java:1306)
	at org.apache.tools.ant.helper.DefaultExecutor.executeTargets(DefaultExecutor.java:41)
	at org.apache.tools.ant.Project.executeTargets(Project.java:1189)
	at org.apache.tools.ant.Main.runBuild(Main.java:758)
	at org.apache.tools.ant.Main.startAnt(Main.java:217)
	at org.apache.tools.ant.launch.Launcher.run(Launcher.java:257)
	at org.apache.tools.ant.launch.Launcher.main(Launcher.java:104)

Total time: 1 second
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1315

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1315/changes>

Changes:

[dvryaboy] PIG-2915: Builtin TOP udf is sensitive to null input bags

[julien] PIG-2844: ant makepom is misconfigured (julien)

------------------------------------------
[...truncated 6519 lines...]
 [findbugs]   com.google.common.util.concurrent.CheckedFuture
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Reader$Scanner$Entry
 [findbugs]   org.apache.hadoop.fs.FSDataInputStream
 [findbugs]   org.python.core.PyObject
 [findbugs]   jline.History
 [findbugs]   org.jruby.embed.internal.LocalContextProvider
 [findbugs]   org.apache.hadoop.io.BooleanWritable
 [findbugs]   org.apache.log4j.Logger
 [findbugs]   org.apache.hadoop.hbase.filter.FamilyFilter
 [findbugs]   groovy.lang.Tuple
 [findbugs]   org.antlr.runtime.IntStream
 [findbugs]   org.apache.hadoop.util.ReflectionUtils
 [findbugs]   org.apache.hadoop.fs.ContentSummary
 [findbugs]   org.jruby.runtime.builtin.IRubyObject
 [findbugs]   org.jruby.RubyInteger
 [findbugs]   org.python.core.PyTuple
 [findbugs]   org.mortbay.log.Log
 [findbugs]   org.apache.hadoop.conf.Configuration
 [findbugs]   com.google.common.base.Joiner
 [findbugs]   org.apache.hadoop.mapreduce.lib.input.FileSplit
 [findbugs]   org.apache.hadoop.mapred.Counters$Counter
 [findbugs]   com.jcraft.jsch.Channel
 [findbugs]   org.apache.hadoop.mapred.JobPriority
 [findbugs]   org.apache.commons.cli.Options
 [findbugs]   org.apache.hadoop.mapred.JobID
 [findbugs]   org.apache.hadoop.util.bloom.BloomFilter
 [findbugs]   org.python.core.PyFrame
 [findbugs]   org.apache.hadoop.hbase.filter.CompareFilter
 [findbugs]   org.apache.hadoop.util.VersionInfo
 [findbugs]   org.python.core.PyString
 [findbugs]   org.apache.hadoop.io.Text$Comparator
 [findbugs]   org.jruby.runtime.Block
 [findbugs]   org.antlr.runtime.MismatchedSetException
 [findbugs]   org.apache.hadoop.io.BytesWritable
 [findbugs]   org.apache.hadoop.fs.FsShell
 [findbugs]   org.joda.time.Months
 [findbugs]   org.mozilla.javascript.ImporterTopLevel
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableOutputFormat
 [findbugs]   org.apache.hadoop.mapred.TaskReport
 [findbugs]   org.apache.hadoop.security.UserGroupInformation
 [findbugs]   org.antlr.runtime.tree.RewriteRuleSubtreeStream
 [findbugs]   org.apache.commons.cli.HelpFormatter
 [findbugs]   com.google.common.collect.Maps
 [findbugs]   org.joda.time.ReadableInstant
 [findbugs]   org.mozilla.javascript.NativeObject
 [findbugs]   org.apache.hadoop.hbase.HConstants
 [findbugs]   org.apache.hadoop.io.serializer.Deserializer
 [findbugs]   org.antlr.runtime.FailedPredicateException
 [findbugs]   org.apache.hadoop.io.compress.CompressionCodec
 [findbugs]   org.jruby.RubyNil
 [findbugs]   org.apache.hadoop.fs.FileStatus
 [findbugs]   org.apache.hadoop.hbase.client.Result
 [findbugs]   org.apache.hadoop.mapreduce.JobContext
 [findbugs]   org.codehaus.jackson.JsonGenerator
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptContext
 [findbugs]   org.apache.hadoop.io.BytesWritable$Comparator
 [findbugs]   org.apache.hadoop.io.LongWritable$Comparator
 [findbugs]   org.codehaus.jackson.map.util.LRUMap
 [findbugs]   org.apache.hadoop.hbase.util.Bytes
 [findbugs]   org.antlr.runtime.MismatchedTokenException
 [findbugs]   org.codehaus.jackson.JsonParser
 [findbugs]   com.jcraft.jsch.UserInfo
 [findbugs]   org.python.core.PyException
 [findbugs]   org.apache.commons.cli.ParseException
 [findbugs]   org.apache.hadoop.io.compress.CompressionOutputStream
 [findbugs]   org.apache.hadoop.hbase.filter.WritableByteArrayComparable
 [findbugs]   org.antlr.runtime.tree.CommonTreeNodeStream
 [findbugs]   org.apache.log4j.Level
 [findbugs]   org.apache.hadoop.hbase.client.Scan
 [findbugs]   org.jruby.anno.JRubyMethod
 [findbugs]   org.apache.hadoop.mapreduce.Job
 [findbugs]   com.google.common.util.concurrent.Futures
 [findbugs]   org.apache.commons.logging.LogFactory
 [findbugs]   org.apache.commons.codec.binary.Base64
 [findbugs]   org.codehaus.jackson.map.ObjectMapper
 [findbugs]   org.apache.hadoop.fs.FileSystem
 [findbugs]   org.jruby.embed.LocalContextScope
 [findbugs]   org.apache.hadoop.hbase.filter.FilterList$Operator
 [findbugs]   org.jruby.RubySymbol
 [findbugs]   org.apache.hadoop.hbase.io.ImmutableBytesWritable
 [findbugs]   org.apache.hadoop.io.serializer.SerializationFactory
 [findbugs]   org.antlr.runtime.tree.TreeAdaptor
 [findbugs]   org.apache.hadoop.mapred.RunningJob
 [findbugs]   org.antlr.runtime.CommonTokenStream
 [findbugs]   org.apache.hadoop.io.DataInputBuffer
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile
 [findbugs]   org.apache.commons.cli.GnuParser
 [findbugs]   org.mozilla.javascript.Context
 [findbugs]   org.apache.hadoop.io.FloatWritable
 [findbugs]   org.antlr.runtime.tree.RewriteEarlyExitException
 [findbugs]   org.apache.hadoop.hbase.HBaseConfiguration
 [findbugs]   org.codehaus.jackson.JsonGenerationException
 [findbugs]   org.apache.hadoop.mapreduce.TaskInputOutputContext
 [findbugs]   org.apache.hadoop.io.compress.GzipCodec
 [findbugs]   org.jruby.RubyString
 [findbugs]   org.antlr.runtime.BaseRecognizer
 [findbugs]   org.apache.hadoop.fs.FileUtil
 [findbugs]   org.apache.hadoop.fs.Path
 [findbugs]   org.jruby.RubyFixnum
 [findbugs]   org.apache.hadoop.hbase.client.Put
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Writer
 [findbugs]   jline.ConsoleReader
 [findbugs]   com.google.common.collect.Lists
 [findbugs]   org.apache.hadoop.mapreduce.MapContext
 [findbugs]   org.python.core.PyJavaPackage
 [findbugs]   org.apache.hadoop.hbase.filter.ColumnPrefixFilter
 [findbugs]   org.python.core.PyStringMap
 [findbugs]   groovy.util.ResourceException
 [findbugs]   org.joda.time.format.DateTimeFormat
 [findbugs]   org.apache.hadoop.mapreduce.TaskID
 [findbugs]   org.joda.time.Period
 [findbugs]   org.jruby.embed.ScriptingContainer
 [findbugs]   org.jruby.Ruby
 [findbugs]   groovy.util.GroovyScriptEngine
 [findbugs]   org.apache.hadoop.hbase.client.HTable
 [findbugs]   org.apache.hadoop.io.FloatWritable$Comparator
 [findbugs]   org.apache.zookeeper.ZooKeeper
 [findbugs]   org.codehaus.jackson.map.JsonMappingException
 [findbugs]   org.python.core.PyFunction
 [findbugs]   org.antlr.runtime.TokenSource
 [findbugs]   com.google.common.io.Files
 [findbugs]   groovy.util.ScriptException
 [findbugs]   com.jcraft.jsch.ChannelDirectTCPIP
 [findbugs]   com.jcraft.jsch.JSchException
 [findbugs]   org.python.util.PythonInterpreter
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil
 [findbugs]   org.python.core.PyInteger
 [findbugs]   org.apache.hadoop.mapred.JobConf
 [findbugs]   org.joda.time.DateTime
 [findbugs]   org.apache.hadoop.util.bloom.Key
 [findbugs]   org.joda.time.Years
 [findbugs]   org.apache.hadoop.io.Text
 [findbugs]   org.antlr.runtime.NoViableAltException
 [findbugs]   org.apache.hadoop.util.GenericOptionsParser
 [findbugs]   org.apache.hadoop.mapreduce.JobID
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptID
 [findbugs]   org.apache.hadoop.filecache.DistributedCache
 [findbugs]   org.joda.time.DateTimeZone
 [findbugs]   org.apache.hadoop.fs.FSDataOutputStream
 [findbugs]   org.python.core.PyList
 [findbugs]   org.antlr.runtime.tree.TreeNodeStream
 [findbugs]   org.apache.hadoop.hbase.filter.BinaryComparator
 [findbugs]   dk.brics.automaton.RegExp
 [findbugs]   org.mozilla.javascript.Scriptable
 [findbugs]   org.mozilla.javascript.EcmaError
 [findbugs]   org.apache.hadoop.io.serializer.Serializer
 [findbugs]   org.fusesource.jansi.AnsiConsole
 [findbugs]   org.apache.hadoop.util.bloom.Filter
 [findbugs]   org.python.core.PyNone
 [findbugs]   org.mozilla.javascript.Function
 [findbugs]   org.python.core.PySystemState
 [findbugs]   org.antlr.runtime.RecognizerSharedState
 [findbugs]   org.codehaus.jackson.JsonFactory
 [findbugs]   org.antlr.runtime.EarlyExitException
 [findbugs]   org.apache.hadoop.hdfs.DistributedFileSystem
 [findbugs]   org.apache.hadoop.util.LineReader
 [findbugs] Warnings generated: 42
 [findbugs] Missing classes: 282
 [findbugs] Calculating exit code...
 [findbugs] Setting 'missing class' flag (2)
 [findbugs] Setting 'bugs found' flag (1)
 [findbugs] Exit code set to: 3
 [findbugs] Java Result: 3
 [findbugs] Classes needed for analysis were missing
 [findbugs] Output saved to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml>
     [xslt] Processing <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml> to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.html>
     [xslt] Loading stylesheet /home/jenkins/tools/findbugs/latest/src/xsl/default.xsl

BUILD SUCCESSFUL
Total time: 5 minutes 16 seconds


======================================================================
======================================================================
STORE: saving artifacts
======================================================================
======================================================================




======================================================================
======================================================================
CLEAN: cleaning workspace
======================================================================
======================================================================


Buildfile: build.xml

clean:
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src-gen>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/docs/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/test/org/apache/pig/test/utils/dotGraph/parser>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig.jar>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig-withouthadoop.jar>

clean:

clean:

BUILD SUCCESSFUL
Total time: 0 seconds


======================================================================
======================================================================
ANALYSIS: ant -Drun.clover=true -Dclover.home=/homes/hudson/tools/clover/latest clover test-commit generate-clover-reports -Dtest.junit.output.format=xml -Dtest.output=yes -Dversion=${BUILD_ID} -Dfindbugs.home=$FINDBUGS_HOME -Djava5.home=$JAVA5_HOME -Dforrest.home=$FORREST_HOME -Dclover.home=$CLOVER_HOME -Declipse.home=$ECLIPSE_HOME
======================================================================
======================================================================


Buildfile: build.xml

clover.setup:
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/clover/db>
[clover-setup] Clover Version 3.1.0, built on May 31 2011 (build-821)
[clover-setup] Loaded from: /home/jenkins/tools/clover/latest/lib/clover.jar

BUILD FAILED
java.lang.RuntimeException: Clover upgrades for your license ended December 14 2010, and this version of Clover was built May 31 2011. Please visit http://www.atlassian.com/clover/renew for information on upgrading your license.
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:103)
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:25)
	at com.cenqua.clover.tasks.AbstractCloverTask.execute(AbstractCloverTask.java:52)
	at org.apache.tools.ant.UnknownElement.execute(UnknownElement.java:288)
	at sun.reflect.GeneratedMethodAccessor1.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
	at java.lang.reflect.Method.invoke(Method.java:597)
	at org.apache.tools.ant.dispatch.DispatchUtils.execute(DispatchUtils.java:106)
	at org.apache.tools.ant.Task.perform(Task.java:348)
	at org.apache.tools.ant.Target.execute(Target.java:357)
	at org.apache.tools.ant.Target.performTasks(Target.java:385)
	at org.apache.tools.ant.Project.executeSortedTargets(Project.java:1337)
	at org.apache.tools.ant.Project.executeTarget(Project.java:1306)
	at org.apache.tools.ant.helper.DefaultExecutor.executeTargets(DefaultExecutor.java:41)
	at org.apache.tools.ant.Project.executeTargets(Project.java:1189)
	at org.apache.tools.ant.Main.runBuild(Main.java:758)
	at org.apache.tools.ant.Main.startAnt(Main.java:217)
	at org.apache.tools.ant.launch.Launcher.run(Launcher.java:257)
	at org.apache.tools.ant.launch.Launcher.main(Launcher.java:104)

Total time: 0 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1314

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1314/changes>

Changes:

[daijy] PIG-2912: Pig should clone JobConf while creating JobContextImpl and TaskAttemptContextImpl in Hadoop23

------------------------------------------
[...truncated 6526 lines...]
 [findbugs]   com.google.common.util.concurrent.CheckedFuture
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Reader$Scanner$Entry
 [findbugs]   org.apache.hadoop.fs.FSDataInputStream
 [findbugs]   org.python.core.PyObject
 [findbugs]   jline.History
 [findbugs]   org.jruby.embed.internal.LocalContextProvider
 [findbugs]   org.apache.hadoop.io.BooleanWritable
 [findbugs]   org.apache.log4j.Logger
 [findbugs]   org.apache.hadoop.hbase.filter.FamilyFilter
 [findbugs]   groovy.lang.Tuple
 [findbugs]   org.antlr.runtime.IntStream
 [findbugs]   org.apache.hadoop.util.ReflectionUtils
 [findbugs]   org.apache.hadoop.fs.ContentSummary
 [findbugs]   org.jruby.runtime.builtin.IRubyObject
 [findbugs]   org.jruby.RubyInteger
 [findbugs]   org.python.core.PyTuple
 [findbugs]   org.mortbay.log.Log
 [findbugs]   org.apache.hadoop.conf.Configuration
 [findbugs]   com.google.common.base.Joiner
 [findbugs]   org.apache.hadoop.mapreduce.lib.input.FileSplit
 [findbugs]   org.apache.hadoop.mapred.Counters$Counter
 [findbugs]   com.jcraft.jsch.Channel
 [findbugs]   org.apache.hadoop.mapred.JobPriority
 [findbugs]   org.apache.commons.cli.Options
 [findbugs]   org.apache.hadoop.mapred.JobID
 [findbugs]   org.apache.hadoop.util.bloom.BloomFilter
 [findbugs]   org.python.core.PyFrame
 [findbugs]   org.apache.hadoop.hbase.filter.CompareFilter
 [findbugs]   org.apache.hadoop.util.VersionInfo
 [findbugs]   org.python.core.PyString
 [findbugs]   org.apache.hadoop.io.Text$Comparator
 [findbugs]   org.jruby.runtime.Block
 [findbugs]   org.antlr.runtime.MismatchedSetException
 [findbugs]   org.apache.hadoop.io.BytesWritable
 [findbugs]   org.apache.hadoop.fs.FsShell
 [findbugs]   org.joda.time.Months
 [findbugs]   org.mozilla.javascript.ImporterTopLevel
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableOutputFormat
 [findbugs]   org.apache.hadoop.mapred.TaskReport
 [findbugs]   org.apache.hadoop.security.UserGroupInformation
 [findbugs]   org.antlr.runtime.tree.RewriteRuleSubtreeStream
 [findbugs]   org.apache.commons.cli.HelpFormatter
 [findbugs]   com.google.common.collect.Maps
 [findbugs]   org.joda.time.ReadableInstant
 [findbugs]   org.mozilla.javascript.NativeObject
 [findbugs]   org.apache.hadoop.hbase.HConstants
 [findbugs]   org.apache.hadoop.io.serializer.Deserializer
 [findbugs]   org.antlr.runtime.FailedPredicateException
 [findbugs]   org.apache.hadoop.io.compress.CompressionCodec
 [findbugs]   org.jruby.RubyNil
 [findbugs]   org.apache.hadoop.fs.FileStatus
 [findbugs]   org.apache.hadoop.hbase.client.Result
 [findbugs]   org.apache.hadoop.mapreduce.JobContext
 [findbugs]   org.codehaus.jackson.JsonGenerator
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptContext
 [findbugs]   org.apache.hadoop.io.BytesWritable$Comparator
 [findbugs]   org.apache.hadoop.io.LongWritable$Comparator
 [findbugs]   org.codehaus.jackson.map.util.LRUMap
 [findbugs]   org.apache.hadoop.hbase.util.Bytes
 [findbugs]   org.antlr.runtime.MismatchedTokenException
 [findbugs]   org.codehaus.jackson.JsonParser
 [findbugs]   com.jcraft.jsch.UserInfo
 [findbugs]   org.python.core.PyException
 [findbugs]   org.apache.commons.cli.ParseException
 [findbugs]   org.apache.hadoop.io.compress.CompressionOutputStream
 [findbugs]   org.apache.hadoop.hbase.filter.WritableByteArrayComparable
 [findbugs]   org.antlr.runtime.tree.CommonTreeNodeStream
 [findbugs]   org.apache.log4j.Level
 [findbugs]   org.apache.hadoop.hbase.client.Scan
 [findbugs]   org.jruby.anno.JRubyMethod
 [findbugs]   org.apache.hadoop.mapreduce.Job
 [findbugs]   com.google.common.util.concurrent.Futures
 [findbugs]   org.apache.commons.logging.LogFactory
 [findbugs]   org.apache.commons.codec.binary.Base64
 [findbugs]   org.codehaus.jackson.map.ObjectMapper
 [findbugs]   org.apache.hadoop.fs.FileSystem
 [findbugs]   org.jruby.embed.LocalContextScope
 [findbugs]   org.apache.hadoop.hbase.filter.FilterList$Operator
 [findbugs]   org.jruby.RubySymbol
 [findbugs]   org.apache.hadoop.hbase.io.ImmutableBytesWritable
 [findbugs]   org.apache.hadoop.io.serializer.SerializationFactory
 [findbugs]   org.antlr.runtime.tree.TreeAdaptor
 [findbugs]   org.apache.hadoop.mapred.RunningJob
 [findbugs]   org.antlr.runtime.CommonTokenStream
 [findbugs]   org.apache.hadoop.io.DataInputBuffer
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile
 [findbugs]   org.apache.commons.cli.GnuParser
 [findbugs]   org.mozilla.javascript.Context
 [findbugs]   org.apache.hadoop.io.FloatWritable
 [findbugs]   org.antlr.runtime.tree.RewriteEarlyExitException
 [findbugs]   org.apache.hadoop.hbase.HBaseConfiguration
 [findbugs]   org.codehaus.jackson.JsonGenerationException
 [findbugs]   org.apache.hadoop.mapreduce.TaskInputOutputContext
 [findbugs]   org.apache.hadoop.io.compress.GzipCodec
 [findbugs]   org.jruby.RubyString
 [findbugs]   org.antlr.runtime.BaseRecognizer
 [findbugs]   org.apache.hadoop.fs.FileUtil
 [findbugs]   org.apache.hadoop.fs.Path
 [findbugs]   org.jruby.RubyFixnum
 [findbugs]   org.apache.hadoop.hbase.client.Put
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Writer
 [findbugs]   jline.ConsoleReader
 [findbugs]   com.google.common.collect.Lists
 [findbugs]   org.apache.hadoop.mapreduce.MapContext
 [findbugs]   org.python.core.PyJavaPackage
 [findbugs]   org.apache.hadoop.hbase.filter.ColumnPrefixFilter
 [findbugs]   org.python.core.PyStringMap
 [findbugs]   groovy.util.ResourceException
 [findbugs]   org.joda.time.format.DateTimeFormat
 [findbugs]   org.apache.hadoop.mapreduce.TaskID
 [findbugs]   org.joda.time.Period
 [findbugs]   org.jruby.embed.ScriptingContainer
 [findbugs]   org.jruby.Ruby
 [findbugs]   groovy.util.GroovyScriptEngine
 [findbugs]   org.apache.hadoop.hbase.client.HTable
 [findbugs]   org.apache.hadoop.io.FloatWritable$Comparator
 [findbugs]   org.apache.zookeeper.ZooKeeper
 [findbugs]   org.codehaus.jackson.map.JsonMappingException
 [findbugs]   org.python.core.PyFunction
 [findbugs]   org.antlr.runtime.TokenSource
 [findbugs]   com.google.common.io.Files
 [findbugs]   groovy.util.ScriptException
 [findbugs]   com.jcraft.jsch.ChannelDirectTCPIP
 [findbugs]   com.jcraft.jsch.JSchException
 [findbugs]   org.python.util.PythonInterpreter
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil
 [findbugs]   org.python.core.PyInteger
 [findbugs]   org.apache.hadoop.mapred.JobConf
 [findbugs]   org.joda.time.DateTime
 [findbugs]   org.apache.hadoop.util.bloom.Key
 [findbugs]   org.joda.time.Years
 [findbugs]   org.apache.hadoop.io.Text
 [findbugs]   org.antlr.runtime.NoViableAltException
 [findbugs]   org.apache.hadoop.util.GenericOptionsParser
 [findbugs]   org.apache.hadoop.mapreduce.JobID
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptID
 [findbugs]   org.apache.hadoop.filecache.DistributedCache
 [findbugs]   org.joda.time.DateTimeZone
 [findbugs]   org.apache.hadoop.fs.FSDataOutputStream
 [findbugs]   org.python.core.PyList
 [findbugs]   org.antlr.runtime.tree.TreeNodeStream
 [findbugs]   org.apache.hadoop.hbase.filter.BinaryComparator
 [findbugs]   dk.brics.automaton.RegExp
 [findbugs]   org.mozilla.javascript.Scriptable
 [findbugs]   org.mozilla.javascript.EcmaError
 [findbugs]   org.apache.hadoop.io.serializer.Serializer
 [findbugs]   org.fusesource.jansi.AnsiConsole
 [findbugs]   org.apache.hadoop.util.bloom.Filter
 [findbugs]   org.python.core.PyNone
 [findbugs]   org.mozilla.javascript.Function
 [findbugs]   org.python.core.PySystemState
 [findbugs]   org.antlr.runtime.RecognizerSharedState
 [findbugs]   org.codehaus.jackson.JsonFactory
 [findbugs]   org.antlr.runtime.EarlyExitException
 [findbugs]   org.apache.hadoop.hdfs.DistributedFileSystem
 [findbugs]   org.apache.hadoop.util.LineReader
 [findbugs] Warnings generated: 42
 [findbugs] Missing classes: 282
 [findbugs] Calculating exit code...
 [findbugs] Setting 'missing class' flag (2)
 [findbugs] Setting 'bugs found' flag (1)
 [findbugs] Exit code set to: 3
 [findbugs] Java Result: 3
 [findbugs] Classes needed for analysis were missing
 [findbugs] Output saved to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml>
     [xslt] Processing <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml> to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.html>
     [xslt] Loading stylesheet /home/jenkins/tools/findbugs/latest/src/xsl/default.xsl

BUILD SUCCESSFUL
Total time: 5 minutes 30 seconds


======================================================================
======================================================================
STORE: saving artifacts
======================================================================
======================================================================




======================================================================
======================================================================
CLEAN: cleaning workspace
======================================================================
======================================================================


Buildfile: build.xml

clean:
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src-gen>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/docs/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/test/org/apache/pig/test/utils/dotGraph/parser>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig.jar>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig-withouthadoop.jar>

clean:

clean:

BUILD SUCCESSFUL
Total time: 0 seconds


======================================================================
======================================================================
ANALYSIS: ant -Drun.clover=true -Dclover.home=/homes/hudson/tools/clover/latest clover test-commit generate-clover-reports -Dtest.junit.output.format=xml -Dtest.output=yes -Dversion=${BUILD_ID} -Dfindbugs.home=$FINDBUGS_HOME -Djava5.home=$JAVA5_HOME -Dforrest.home=$FORREST_HOME -Dclover.home=$CLOVER_HOME -Declipse.home=$ECLIPSE_HOME
======================================================================
======================================================================


Buildfile: build.xml

clover.setup:
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/clover/db>
[clover-setup] Clover Version 3.1.0, built on May 31 2011 (build-821)
[clover-setup] Loaded from: /home/jenkins/tools/clover/latest/lib/clover.jar

BUILD FAILED
java.lang.RuntimeException: Clover upgrades for your license ended December 14 2010, and this version of Clover was built May 31 2011. Please visit http://www.atlassian.com/clover/renew for information on upgrading your license.
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:103)
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:25)
	at com.cenqua.clover.tasks.AbstractCloverTask.execute(AbstractCloverTask.java:52)
	at org.apache.tools.ant.UnknownElement.execute(UnknownElement.java:288)
	at sun.reflect.GeneratedMethodAccessor1.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
	at java.lang.reflect.Method.invoke(Method.java:597)
	at org.apache.tools.ant.dispatch.DispatchUtils.execute(DispatchUtils.java:106)
	at org.apache.tools.ant.Task.perform(Task.java:348)
	at org.apache.tools.ant.Target.execute(Target.java:357)
	at org.apache.tools.ant.Target.performTasks(Target.java:385)
	at org.apache.tools.ant.Project.executeSortedTargets(Project.java:1337)
	at org.apache.tools.ant.Project.executeTarget(Project.java:1306)
	at org.apache.tools.ant.helper.DefaultExecutor.executeTargets(DefaultExecutor.java:41)
	at org.apache.tools.ant.Project.executeTargets(Project.java:1189)
	at org.apache.tools.ant.Main.runBuild(Main.java:758)
	at org.apache.tools.ant.Main.startAnt(Main.java:217)
	at org.apache.tools.ant.launch.Launcher.run(Launcher.java:257)
	at org.apache.tools.ant.launch.Launcher.main(Launcher.java:104)

Total time: 0 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1313

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1313/changes>

Changes:

[billgraham] PIG-2901: Errors and lacks in document Pig Latin Basics (miyakawataku via billgraham)

------------------------------------------
[...truncated 6520 lines...]
 [findbugs]   com.google.common.util.concurrent.CheckedFuture
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Reader$Scanner$Entry
 [findbugs]   org.apache.hadoop.fs.FSDataInputStream
 [findbugs]   org.python.core.PyObject
 [findbugs]   jline.History
 [findbugs]   org.jruby.embed.internal.LocalContextProvider
 [findbugs]   org.apache.hadoop.io.BooleanWritable
 [findbugs]   org.apache.log4j.Logger
 [findbugs]   org.apache.hadoop.hbase.filter.FamilyFilter
 [findbugs]   groovy.lang.Tuple
 [findbugs]   org.antlr.runtime.IntStream
 [findbugs]   org.apache.hadoop.util.ReflectionUtils
 [findbugs]   org.apache.hadoop.fs.ContentSummary
 [findbugs]   org.jruby.runtime.builtin.IRubyObject
 [findbugs]   org.jruby.RubyInteger
 [findbugs]   org.python.core.PyTuple
 [findbugs]   org.mortbay.log.Log
 [findbugs]   org.apache.hadoop.conf.Configuration
 [findbugs]   com.google.common.base.Joiner
 [findbugs]   org.apache.hadoop.mapreduce.lib.input.FileSplit
 [findbugs]   org.apache.hadoop.mapred.Counters$Counter
 [findbugs]   com.jcraft.jsch.Channel
 [findbugs]   org.apache.hadoop.mapred.JobPriority
 [findbugs]   org.apache.commons.cli.Options
 [findbugs]   org.apache.hadoop.mapred.JobID
 [findbugs]   org.apache.hadoop.util.bloom.BloomFilter
 [findbugs]   org.python.core.PyFrame
 [findbugs]   org.apache.hadoop.hbase.filter.CompareFilter
 [findbugs]   org.apache.hadoop.util.VersionInfo
 [findbugs]   org.python.core.PyString
 [findbugs]   org.apache.hadoop.io.Text$Comparator
 [findbugs]   org.jruby.runtime.Block
 [findbugs]   org.antlr.runtime.MismatchedSetException
 [findbugs]   org.apache.hadoop.io.BytesWritable
 [findbugs]   org.apache.hadoop.fs.FsShell
 [findbugs]   org.joda.time.Months
 [findbugs]   org.mozilla.javascript.ImporterTopLevel
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableOutputFormat
 [findbugs]   org.apache.hadoop.mapred.TaskReport
 [findbugs]   org.apache.hadoop.security.UserGroupInformation
 [findbugs]   org.antlr.runtime.tree.RewriteRuleSubtreeStream
 [findbugs]   org.apache.commons.cli.HelpFormatter
 [findbugs]   com.google.common.collect.Maps
 [findbugs]   org.joda.time.ReadableInstant
 [findbugs]   org.mozilla.javascript.NativeObject
 [findbugs]   org.apache.hadoop.hbase.HConstants
 [findbugs]   org.apache.hadoop.io.serializer.Deserializer
 [findbugs]   org.antlr.runtime.FailedPredicateException
 [findbugs]   org.apache.hadoop.io.compress.CompressionCodec
 [findbugs]   org.jruby.RubyNil
 [findbugs]   org.apache.hadoop.fs.FileStatus
 [findbugs]   org.apache.hadoop.hbase.client.Result
 [findbugs]   org.apache.hadoop.mapreduce.JobContext
 [findbugs]   org.codehaus.jackson.JsonGenerator
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptContext
 [findbugs]   org.apache.hadoop.io.BytesWritable$Comparator
 [findbugs]   org.apache.hadoop.io.LongWritable$Comparator
 [findbugs]   org.codehaus.jackson.map.util.LRUMap
 [findbugs]   org.apache.hadoop.hbase.util.Bytes
 [findbugs]   org.antlr.runtime.MismatchedTokenException
 [findbugs]   org.codehaus.jackson.JsonParser
 [findbugs]   com.jcraft.jsch.UserInfo
 [findbugs]   org.python.core.PyException
 [findbugs]   org.apache.commons.cli.ParseException
 [findbugs]   org.apache.hadoop.io.compress.CompressionOutputStream
 [findbugs]   org.apache.hadoop.hbase.filter.WritableByteArrayComparable
 [findbugs]   org.antlr.runtime.tree.CommonTreeNodeStream
 [findbugs]   org.apache.log4j.Level
 [findbugs]   org.apache.hadoop.hbase.client.Scan
 [findbugs]   org.jruby.anno.JRubyMethod
 [findbugs]   org.apache.hadoop.mapreduce.Job
 [findbugs]   com.google.common.util.concurrent.Futures
 [findbugs]   org.apache.commons.logging.LogFactory
 [findbugs]   org.apache.commons.codec.binary.Base64
 [findbugs]   org.codehaus.jackson.map.ObjectMapper
 [findbugs]   org.apache.hadoop.fs.FileSystem
 [findbugs]   org.jruby.embed.LocalContextScope
 [findbugs]   org.apache.hadoop.hbase.filter.FilterList$Operator
 [findbugs]   org.jruby.RubySymbol
 [findbugs]   org.apache.hadoop.hbase.io.ImmutableBytesWritable
 [findbugs]   org.apache.hadoop.io.serializer.SerializationFactory
 [findbugs]   org.antlr.runtime.tree.TreeAdaptor
 [findbugs]   org.apache.hadoop.mapred.RunningJob
 [findbugs]   org.antlr.runtime.CommonTokenStream
 [findbugs]   org.apache.hadoop.io.DataInputBuffer
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile
 [findbugs]   org.apache.commons.cli.GnuParser
 [findbugs]   org.mozilla.javascript.Context
 [findbugs]   org.apache.hadoop.io.FloatWritable
 [findbugs]   org.antlr.runtime.tree.RewriteEarlyExitException
 [findbugs]   org.apache.hadoop.hbase.HBaseConfiguration
 [findbugs]   org.codehaus.jackson.JsonGenerationException
 [findbugs]   org.apache.hadoop.mapreduce.TaskInputOutputContext
 [findbugs]   org.apache.hadoop.io.compress.GzipCodec
 [findbugs]   org.jruby.RubyString
 [findbugs]   org.antlr.runtime.BaseRecognizer
 [findbugs]   org.apache.hadoop.fs.FileUtil
 [findbugs]   org.apache.hadoop.fs.Path
 [findbugs]   org.jruby.RubyFixnum
 [findbugs]   org.apache.hadoop.hbase.client.Put
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Writer
 [findbugs]   jline.ConsoleReader
 [findbugs]   com.google.common.collect.Lists
 [findbugs]   org.apache.hadoop.mapreduce.MapContext
 [findbugs]   org.python.core.PyJavaPackage
 [findbugs]   org.apache.hadoop.hbase.filter.ColumnPrefixFilter
 [findbugs]   org.python.core.PyStringMap
 [findbugs]   groovy.util.ResourceException
 [findbugs]   org.joda.time.format.DateTimeFormat
 [findbugs]   org.apache.hadoop.mapreduce.TaskID
 [findbugs]   org.joda.time.Period
 [findbugs]   org.jruby.embed.ScriptingContainer
 [findbugs]   org.jruby.Ruby
 [findbugs]   groovy.util.GroovyScriptEngine
 [findbugs]   org.apache.hadoop.hbase.client.HTable
 [findbugs]   org.apache.hadoop.io.FloatWritable$Comparator
 [findbugs]   org.apache.zookeeper.ZooKeeper
 [findbugs]   org.codehaus.jackson.map.JsonMappingException
 [findbugs]   org.python.core.PyFunction
 [findbugs]   org.antlr.runtime.TokenSource
 [findbugs]   com.google.common.io.Files
 [findbugs]   groovy.util.ScriptException
 [findbugs]   com.jcraft.jsch.ChannelDirectTCPIP
 [findbugs]   com.jcraft.jsch.JSchException
 [findbugs]   org.python.util.PythonInterpreter
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil
 [findbugs]   org.python.core.PyInteger
 [findbugs]   org.apache.hadoop.mapred.JobConf
 [findbugs]   org.joda.time.DateTime
 [findbugs]   org.apache.hadoop.util.bloom.Key
 [findbugs]   org.joda.time.Years
 [findbugs]   org.apache.hadoop.io.Text
 [findbugs]   org.antlr.runtime.NoViableAltException
 [findbugs]   org.apache.hadoop.util.GenericOptionsParser
 [findbugs]   org.apache.hadoop.mapreduce.JobID
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptID
 [findbugs]   org.apache.hadoop.filecache.DistributedCache
 [findbugs]   org.joda.time.DateTimeZone
 [findbugs]   org.apache.hadoop.fs.FSDataOutputStream
 [findbugs]   org.python.core.PyList
 [findbugs]   org.antlr.runtime.tree.TreeNodeStream
 [findbugs]   org.apache.hadoop.hbase.filter.BinaryComparator
 [findbugs]   dk.brics.automaton.RegExp
 [findbugs]   org.mozilla.javascript.Scriptable
 [findbugs]   org.mozilla.javascript.EcmaError
 [findbugs]   org.apache.hadoop.io.serializer.Serializer
 [findbugs]   org.fusesource.jansi.AnsiConsole
 [findbugs]   org.apache.hadoop.util.bloom.Filter
 [findbugs]   org.python.core.PyNone
 [findbugs]   org.mozilla.javascript.Function
 [findbugs]   org.python.core.PySystemState
 [findbugs]   org.antlr.runtime.RecognizerSharedState
 [findbugs]   org.codehaus.jackson.JsonFactory
 [findbugs]   org.antlr.runtime.EarlyExitException
 [findbugs]   org.apache.hadoop.hdfs.DistributedFileSystem
 [findbugs]   org.apache.hadoop.util.LineReader
 [findbugs] Warnings generated: 42
 [findbugs] Missing classes: 282
 [findbugs] Calculating exit code...
 [findbugs] Setting 'missing class' flag (2)
 [findbugs] Setting 'bugs found' flag (1)
 [findbugs] Exit code set to: 3
 [findbugs] Java Result: 3
 [findbugs] Classes needed for analysis were missing
 [findbugs] Output saved to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml>
     [xslt] Processing <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml> to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.html>
     [xslt] Loading stylesheet /home/jenkins/tools/findbugs/latest/src/xsl/default.xsl

BUILD SUCCESSFUL
Total time: 5 minutes 16 seconds


======================================================================
======================================================================
STORE: saving artifacts
======================================================================
======================================================================




======================================================================
======================================================================
CLEAN: cleaning workspace
======================================================================
======================================================================


Buildfile: build.xml

clean:
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src-gen>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/docs/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/test/org/apache/pig/test/utils/dotGraph/parser>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig.jar>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig-withouthadoop.jar>

clean:

clean:

BUILD SUCCESSFUL
Total time: 0 seconds


======================================================================
======================================================================
ANALYSIS: ant -Drun.clover=true -Dclover.home=/homes/hudson/tools/clover/latest clover test-commit generate-clover-reports -Dtest.junit.output.format=xml -Dtest.output=yes -Dversion=${BUILD_ID} -Dfindbugs.home=$FINDBUGS_HOME -Djava5.home=$JAVA5_HOME -Dforrest.home=$FORREST_HOME -Dclover.home=$CLOVER_HOME -Declipse.home=$ECLIPSE_HOME
======================================================================
======================================================================


Buildfile: build.xml

clover.setup:
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/clover/db>
[clover-setup] Clover Version 3.1.0, built on May 31 2011 (build-821)
[clover-setup] Loaded from: /home/jenkins/tools/clover/latest/lib/clover.jar

BUILD FAILED
java.lang.RuntimeException: Clover upgrades for your license ended December 14 2010, and this version of Clover was built May 31 2011. Please visit http://www.atlassian.com/clover/renew for information on upgrading your license.
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:103)
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:25)
	at com.cenqua.clover.tasks.AbstractCloverTask.execute(AbstractCloverTask.java:52)
	at org.apache.tools.ant.UnknownElement.execute(UnknownElement.java:288)
	at sun.reflect.GeneratedMethodAccessor1.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
	at java.lang.reflect.Method.invoke(Method.java:597)
	at org.apache.tools.ant.dispatch.DispatchUtils.execute(DispatchUtils.java:106)
	at org.apache.tools.ant.Task.perform(Task.java:348)
	at org.apache.tools.ant.Target.execute(Target.java:357)
	at org.apache.tools.ant.Target.performTasks(Target.java:385)
	at org.apache.tools.ant.Project.executeSortedTargets(Project.java:1337)
	at org.apache.tools.ant.Project.executeTarget(Project.java:1306)
	at org.apache.tools.ant.helper.DefaultExecutor.executeTargets(DefaultExecutor.java:41)
	at org.apache.tools.ant.Project.executeTargets(Project.java:1189)
	at org.apache.tools.ant.Main.runBuild(Main.java:758)
	at org.apache.tools.ant.Main.startAnt(Main.java:217)
	at org.apache.tools.ant.launch.Launcher.run(Launcher.java:257)
	at org.apache.tools.ant.launch.Launcher.main(Launcher.java:104)

Total time: 1 second
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1312

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1312/changes>

Changes:

[gates] PIG-1891 Enable StoreFunc to make intelligent decision based on job success or failure

------------------------------------------
[...truncated 36956 lines...]
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:132)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/09/08 10:32:57 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] Shutting down DataNode 2
    [junit] 12/09/08 10:32:57 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/09/08 10:32:57 INFO ipc.Server: Stopping server on 36558
    [junit] 12/09/08 10:32:57 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/08 10:32:57 INFO ipc.Server: IPC Server handler 0 on 36558: exiting
    [junit] 12/09/08 10:32:57 INFO ipc.Server: Stopping IPC Server listener on 36558
    [junit] 12/09/08 10:32:57 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/09/08 10:32:57 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/09/08 10:32:57 INFO ipc.Server: IPC Server handler 1 on 36558: exiting
    [junit] 12/09/08 10:32:57 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:46722, storageID=DS-2084239863-67.195.138.20-46722-1347099893439, infoPort=39736, ipcPort=36558):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/09/08 10:32:57 INFO ipc.Server: IPC Server handler 2 on 36558: exiting
    [junit] 12/09/08 10:32:57 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/09/08 10:32:57 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:46722 to delete  blk_1844885325031836069_1095 blk_6508533150285859957_1102 blk_-8835708142274856728_1101 blk_-2237968423149197077_1102
    [junit] 12/09/08 10:32:57 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:47479 to delete  blk_1844885325031836069_1095 blk_6508533150285859957_1102 blk_-8835708142274856728_1101 blk_-2237968423149197077_1102
    [junit] 12/09/08 10:32:57 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/09/08 10:32:58 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/09/08 10:32:58 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:46722, storageID=DS-2084239863-67.195.138.20-46722-1347099893439, infoPort=39736, ipcPort=36558):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data5/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data6/current'}>
    [junit] 12/09/08 10:32:58 INFO ipc.Server: Stopping server on 36558
    [junit] 12/09/08 10:32:58 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/08 10:32:58 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/09/08 10:32:58 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/09/08 10:32:58 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/09/08 10:32:58 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-394166431
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-394166431
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] Shutting down DataNode 1
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:132)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/09/08 10:32:58 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/09/08 10:32:58 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/09/08 10:32:58 INFO ipc.Server: Stopping server on 60980
    [junit] 12/09/08 10:32:58 INFO ipc.Server: IPC Server handler 1 on 60980: exiting
    [junit] 12/09/08 10:32:58 INFO ipc.Server: IPC Server handler 0 on 60980: exiting
    [junit] 12/09/08 10:32:58 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/09/08 10:32:58 INFO ipc.Server: Stopping IPC Server listener on 60980
    [junit] 12/09/08 10:32:58 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/08 10:32:58 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/09/08 10:32:58 INFO ipc.Server: IPC Server handler 2 on 60980: exiting
    [junit] 12/09/08 10:32:58 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:47479, storageID=DS-1440979677-67.195.138.20-47479-1347099893042, infoPort=48220, ipcPort=60980):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/09/08 10:32:58 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/09/08 10:32:59 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/09/08 10:32:59 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:47479, storageID=DS-1440979677-67.195.138.20-47479-1347099893042, infoPort=48220, ipcPort=60980):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data3/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data4/current'}>
    [junit] 12/09/08 10:32:59 INFO ipc.Server: Stopping server on 60980
    [junit] 12/09/08 10:32:59 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/08 10:32:59 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/09/08 10:32:59 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/09/08 10:32:59 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/09/08 10:32:59 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/09/08 10:32:59 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1526988081
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1526988081
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] Shutting down DataNode 0
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:132)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/09/08 10:32:59 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/09/08 10:32:59 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/09/08 10:32:59 INFO ipc.Server: Stopping server on 57506
    [junit] 12/09/08 10:32:59 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/08 10:32:59 INFO ipc.Server: IPC Server handler 1 on 57506: exiting
    [junit] 12/09/08 10:32:59 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:45078, storageID=DS-567189157-67.195.138.20-45078-1347099892618, infoPort=37698, ipcPort=57506):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/09/08 10:32:59 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/09/08 10:32:59 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/09/08 10:32:59 INFO ipc.Server: Stopping IPC Server listener on 57506
    [junit] 12/09/08 10:32:59 INFO ipc.Server: IPC Server handler 0 on 57506: exiting
    [junit] 12/09/08 10:32:59 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/09/08 10:32:59 INFO ipc.Server: IPC Server handler 2 on 57506: exiting
    [junit] 12/09/08 10:32:59 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/09/08 10:32:59 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:45078, storageID=DS-567189157-67.195.138.20-45078-1347099892618, infoPort=37698, ipcPort=57506):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data1/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data2/current'}>
    [junit] 12/09/08 10:32:59 WARN util.MBeans: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.unRegisterMXBean(DataNode.java:513)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:726)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.run(DataNode.java:1442)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/09/08 10:32:59 INFO ipc.Server: Stopping server on 57506
    [junit] 12/09/08 10:32:59 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/08 10:32:59 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/09/08 10:32:59 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/09/08 10:32:59 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/09/08 10:32:59 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1599585202
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1599585202
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:132)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/09/08 10:32:59 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/09/08 10:32:59 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/09/08 10:32:59 WARN namenode.FSNamesystem: ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 12/09/08 10:32:59 INFO namenode.DecommissionManager: Interrupted Monitor
    [junit] java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.hdfs.server.namenode.DecommissionManager$Monitor.run(DecommissionManager.java:65)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/09/08 10:32:59 INFO namenode.FSNamesystem: Number of transactions: 502 Total time for transactions(ms): 8Number of transactions batched in Syncs: 156 Number of syncs: 348 SyncTimes(ms): 4210 201 
    [junit] 12/09/08 10:32:59 INFO ipc.Server: Stopping server on 37247
    [junit] 12/09/08 10:32:59 INFO ipc.Server: IPC Server handler 0 on 37247: exiting
    [junit] 12/09/08 10:32:59 INFO ipc.Server: IPC Server handler 1 on 37247: exiting
    [junit] 12/09/08 10:32:59 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/08 10:32:59 INFO ipc.Server: IPC Server handler 7 on 37247: exiting
    [junit] 12/09/08 10:32:59 INFO ipc.Server: IPC Server handler 5 on 37247: exiting
    [junit] 12/09/08 10:32:59 INFO ipc.Server: IPC Server handler 9 on 37247: exiting
    [junit] 12/09/08 10:32:59 INFO ipc.Server: IPC Server handler 4 on 37247: exiting
    [junit] 12/09/08 10:32:59 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/09/08 10:32:59 INFO ipc.Server: IPC Server handler 3 on 37247: exiting
    [junit] 12/09/08 10:32:59 INFO ipc.Server: IPC Server handler 2 on 37247: exiting
    [junit] 12/09/08 10:32:59 INFO ipc.Server: Stopping IPC Server listener on 37247
    [junit] 12/09/08 10:32:59 INFO ipc.Server: IPC Server handler 6 on 37247: exiting
    [junit] 12/09/08 10:32:59 INFO ipc.Server: IPC Server handler 8 on 37247: exiting
    [junit] Tests run: 17, Failures: 3, Errors: 3, Time elapsed: 480.252 sec
    [junit] Test org.apache.pig.test.TestStore FAILED
    [junit] Running org.apache.pig.test.TestStringUDFs
    [junit] 12/09/08 10:33:00 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.NullPointerException
    [junit] 12/09/08 10:33:00 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/09/08 10:33:00 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -1
    [junit] 12/09/08 10:33:00 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -8
    [junit] 12/09/08 10:33:00 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/09/08 10:33:00 WARN builtin.INDEXOF: No logger object provided to UDF: org.apache.pig.builtin.INDEXOF. Failed to process input; error - null
    [junit] 12/09/08 10:33:00 WARN builtin.LAST_INDEX_OF: No logger object provided to UDF: org.apache.pig.builtin.LAST_INDEX_OF. Failed to process input; error - null
    [junit] Tests run: 11, Failures: 0, Errors: 0, Time elapsed: 0.238 sec
   [delete] Deleting directory /tmp/pig_junit_tmp777364487

BUILD FAILED
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:810: The following error occurred while executing this line:
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:878: Tests failed!

Total time: 19 minutes 56 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1311

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1311/changes>

Changes:

[billgraham] PIG-2905: Improve documentation around REPLACE (cheolsoo via billgraham)

------------------------------------------
[...truncated 6860 lines...]
 [findbugs]   com.google.common.util.concurrent.CheckedFuture
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Reader$Scanner$Entry
 [findbugs]   org.apache.hadoop.fs.FSDataInputStream
 [findbugs]   org.python.core.PyObject
 [findbugs]   jline.History
 [findbugs]   org.jruby.embed.internal.LocalContextProvider
 [findbugs]   org.apache.hadoop.io.BooleanWritable
 [findbugs]   org.apache.log4j.Logger
 [findbugs]   org.apache.hadoop.hbase.filter.FamilyFilter
 [findbugs]   groovy.lang.Tuple
 [findbugs]   org.antlr.runtime.IntStream
 [findbugs]   org.apache.hadoop.util.ReflectionUtils
 [findbugs]   org.apache.hadoop.fs.ContentSummary
 [findbugs]   org.jruby.runtime.builtin.IRubyObject
 [findbugs]   org.jruby.RubyInteger
 [findbugs]   org.python.core.PyTuple
 [findbugs]   org.mortbay.log.Log
 [findbugs]   org.apache.hadoop.conf.Configuration
 [findbugs]   com.google.common.base.Joiner
 [findbugs]   org.apache.hadoop.mapreduce.lib.input.FileSplit
 [findbugs]   org.apache.hadoop.mapred.Counters$Counter
 [findbugs]   com.jcraft.jsch.Channel
 [findbugs]   org.apache.hadoop.mapred.JobPriority
 [findbugs]   org.apache.commons.cli.Options
 [findbugs]   org.apache.hadoop.mapred.JobID
 [findbugs]   org.apache.hadoop.util.bloom.BloomFilter
 [findbugs]   org.python.core.PyFrame
 [findbugs]   org.apache.hadoop.hbase.filter.CompareFilter
 [findbugs]   org.apache.hadoop.util.VersionInfo
 [findbugs]   org.python.core.PyString
 [findbugs]   org.apache.hadoop.io.Text$Comparator
 [findbugs]   org.jruby.runtime.Block
 [findbugs]   org.antlr.runtime.MismatchedSetException
 [findbugs]   org.apache.hadoop.io.BytesWritable
 [findbugs]   org.apache.hadoop.fs.FsShell
 [findbugs]   org.joda.time.Months
 [findbugs]   org.mozilla.javascript.ImporterTopLevel
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableOutputFormat
 [findbugs]   org.apache.hadoop.mapred.TaskReport
 [findbugs]   org.apache.hadoop.security.UserGroupInformation
 [findbugs]   org.antlr.runtime.tree.RewriteRuleSubtreeStream
 [findbugs]   org.apache.commons.cli.HelpFormatter
 [findbugs]   com.google.common.collect.Maps
 [findbugs]   org.joda.time.ReadableInstant
 [findbugs]   org.mozilla.javascript.NativeObject
 [findbugs]   org.apache.hadoop.hbase.HConstants
 [findbugs]   org.apache.hadoop.io.serializer.Deserializer
 [findbugs]   org.antlr.runtime.FailedPredicateException
 [findbugs]   org.apache.hadoop.io.compress.CompressionCodec
 [findbugs]   org.jruby.RubyNil
 [findbugs]   org.apache.hadoop.fs.FileStatus
 [findbugs]   org.apache.hadoop.hbase.client.Result
 [findbugs]   org.apache.hadoop.mapreduce.JobContext
 [findbugs]   org.codehaus.jackson.JsonGenerator
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptContext
 [findbugs]   org.apache.hadoop.io.BytesWritable$Comparator
 [findbugs]   org.apache.hadoop.io.LongWritable$Comparator
 [findbugs]   org.codehaus.jackson.map.util.LRUMap
 [findbugs]   org.apache.hadoop.hbase.util.Bytes
 [findbugs]   org.antlr.runtime.MismatchedTokenException
 [findbugs]   org.codehaus.jackson.JsonParser
 [findbugs]   com.jcraft.jsch.UserInfo
 [findbugs]   org.python.core.PyException
 [findbugs]   org.apache.commons.cli.ParseException
 [findbugs]   org.apache.hadoop.io.compress.CompressionOutputStream
 [findbugs]   org.apache.hadoop.hbase.filter.WritableByteArrayComparable
 [findbugs]   org.antlr.runtime.tree.CommonTreeNodeStream
 [findbugs]   org.apache.log4j.Level
 [findbugs]   org.apache.hadoop.hbase.client.Scan
 [findbugs]   org.jruby.anno.JRubyMethod
 [findbugs]   org.apache.hadoop.mapreduce.Job
 [findbugs]   com.google.common.util.concurrent.Futures
 [findbugs]   org.apache.commons.logging.LogFactory
 [findbugs]   org.apache.commons.codec.binary.Base64
 [findbugs]   org.codehaus.jackson.map.ObjectMapper
 [findbugs]   org.apache.hadoop.fs.FileSystem
 [findbugs]   org.jruby.embed.LocalContextScope
 [findbugs]   org.apache.hadoop.hbase.filter.FilterList$Operator
 [findbugs]   org.jruby.RubySymbol
 [findbugs]   org.apache.hadoop.hbase.io.ImmutableBytesWritable
 [findbugs]   org.apache.hadoop.io.serializer.SerializationFactory
 [findbugs]   org.antlr.runtime.tree.TreeAdaptor
 [findbugs]   org.apache.hadoop.mapred.RunningJob
 [findbugs]   org.antlr.runtime.CommonTokenStream
 [findbugs]   org.apache.hadoop.io.DataInputBuffer
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile
 [findbugs]   org.apache.commons.cli.GnuParser
 [findbugs]   org.mozilla.javascript.Context
 [findbugs]   org.apache.hadoop.io.FloatWritable
 [findbugs]   org.antlr.runtime.tree.RewriteEarlyExitException
 [findbugs]   org.apache.hadoop.hbase.HBaseConfiguration
 [findbugs]   org.codehaus.jackson.JsonGenerationException
 [findbugs]   org.apache.hadoop.mapreduce.TaskInputOutputContext
 [findbugs]   org.apache.hadoop.io.compress.GzipCodec
 [findbugs]   org.jruby.RubyString
 [findbugs]   org.antlr.runtime.BaseRecognizer
 [findbugs]   org.apache.hadoop.fs.FileUtil
 [findbugs]   org.apache.hadoop.fs.Path
 [findbugs]   org.jruby.RubyFixnum
 [findbugs]   org.apache.hadoop.hbase.client.Put
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Writer
 [findbugs]   jline.ConsoleReader
 [findbugs]   com.google.common.collect.Lists
 [findbugs]   org.apache.hadoop.mapreduce.MapContext
 [findbugs]   org.python.core.PyJavaPackage
 [findbugs]   org.apache.hadoop.hbase.filter.ColumnPrefixFilter
 [findbugs]   org.python.core.PyStringMap
 [findbugs]   groovy.util.ResourceException
 [findbugs]   org.joda.time.format.DateTimeFormat
 [findbugs]   org.apache.hadoop.mapreduce.TaskID
 [findbugs]   org.joda.time.Period
 [findbugs]   org.jruby.embed.ScriptingContainer
 [findbugs]   org.jruby.Ruby
 [findbugs]   groovy.util.GroovyScriptEngine
 [findbugs]   org.apache.hadoop.hbase.client.HTable
 [findbugs]   org.apache.hadoop.io.FloatWritable$Comparator
 [findbugs]   org.apache.zookeeper.ZooKeeper
 [findbugs]   org.codehaus.jackson.map.JsonMappingException
 [findbugs]   org.python.core.PyFunction
 [findbugs]   org.antlr.runtime.TokenSource
 [findbugs]   com.google.common.io.Files
 [findbugs]   groovy.util.ScriptException
 [findbugs]   com.jcraft.jsch.ChannelDirectTCPIP
 [findbugs]   com.jcraft.jsch.JSchException
 [findbugs]   org.python.util.PythonInterpreter
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil
 [findbugs]   org.python.core.PyInteger
 [findbugs]   org.apache.hadoop.mapred.JobConf
 [findbugs]   org.joda.time.DateTime
 [findbugs]   org.apache.hadoop.util.bloom.Key
 [findbugs]   org.joda.time.Years
 [findbugs]   org.apache.hadoop.io.Text
 [findbugs]   org.antlr.runtime.NoViableAltException
 [findbugs]   org.apache.hadoop.util.GenericOptionsParser
 [findbugs]   org.apache.hadoop.mapreduce.JobID
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptID
 [findbugs]   org.apache.hadoop.filecache.DistributedCache
 [findbugs]   org.joda.time.DateTimeZone
 [findbugs]   org.apache.hadoop.fs.FSDataOutputStream
 [findbugs]   org.python.core.PyList
 [findbugs]   org.antlr.runtime.tree.TreeNodeStream
 [findbugs]   org.apache.hadoop.hbase.filter.BinaryComparator
 [findbugs]   dk.brics.automaton.RegExp
 [findbugs]   org.mozilla.javascript.Scriptable
 [findbugs]   org.mozilla.javascript.EcmaError
 [findbugs]   org.apache.hadoop.io.serializer.Serializer
 [findbugs]   org.fusesource.jansi.AnsiConsole
 [findbugs]   org.apache.hadoop.util.bloom.Filter
 [findbugs]   org.python.core.PyNone
 [findbugs]   org.mozilla.javascript.Function
 [findbugs]   org.python.core.PySystemState
 [findbugs]   org.antlr.runtime.RecognizerSharedState
 [findbugs]   org.codehaus.jackson.JsonFactory
 [findbugs]   org.antlr.runtime.EarlyExitException
 [findbugs]   org.apache.hadoop.hdfs.DistributedFileSystem
 [findbugs]   org.apache.hadoop.util.LineReader
 [findbugs] Warnings generated: 42
 [findbugs] Missing classes: 282
 [findbugs] Calculating exit code...
 [findbugs] Setting 'missing class' flag (2)
 [findbugs] Setting 'bugs found' flag (1)
 [findbugs] Exit code set to: 3
 [findbugs] Java Result: 3
 [findbugs] Classes needed for analysis were missing
 [findbugs] Output saved to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml>
     [xslt] Processing <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml> to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.html>
     [xslt] Loading stylesheet /home/jenkins/tools/findbugs/latest/src/xsl/default.xsl

BUILD SUCCESSFUL
Total time: 6 minutes 31 seconds


======================================================================
======================================================================
STORE: saving artifacts
======================================================================
======================================================================




======================================================================
======================================================================
CLEAN: cleaning workspace
======================================================================
======================================================================


Buildfile: build.xml

clean:
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src-gen>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/docs/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/test/org/apache/pig/test/utils/dotGraph/parser>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig.jar>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig-withouthadoop.jar>

clean:

clean:

BUILD SUCCESSFUL
Total time: 0 seconds


======================================================================
======================================================================
ANALYSIS: ant -Drun.clover=true -Dclover.home=/homes/hudson/tools/clover/latest clover test-commit generate-clover-reports -Dtest.junit.output.format=xml -Dtest.output=yes -Dversion=${BUILD_ID} -Dfindbugs.home=$FINDBUGS_HOME -Djava5.home=$JAVA5_HOME -Dforrest.home=$FORREST_HOME -Dclover.home=$CLOVER_HOME -Declipse.home=$ECLIPSE_HOME
======================================================================
======================================================================


Buildfile: build.xml

clover.setup:
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/clover/db>
[clover-setup] Clover Version 3.1.0, built on May 31 2011 (build-821)
[clover-setup] Loaded from: /home/jenkins/tools/clover/latest/lib/clover.jar

BUILD FAILED
java.lang.RuntimeException: Clover upgrades for your license ended December 14 2010, and this version of Clover was built May 31 2011. Please visit http://www.atlassian.com/clover/renew for information on upgrading your license.
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:103)
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:25)
	at com.cenqua.clover.tasks.AbstractCloverTask.execute(AbstractCloverTask.java:52)
	at org.apache.tools.ant.UnknownElement.execute(UnknownElement.java:288)
	at sun.reflect.GeneratedMethodAccessor1.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
	at java.lang.reflect.Method.invoke(Method.java:597)
	at org.apache.tools.ant.dispatch.DispatchUtils.execute(DispatchUtils.java:106)
	at org.apache.tools.ant.Task.perform(Task.java:348)
	at org.apache.tools.ant.Target.execute(Target.java:357)
	at org.apache.tools.ant.Target.performTasks(Target.java:385)
	at org.apache.tools.ant.Project.executeSortedTargets(Project.java:1337)
	at org.apache.tools.ant.Project.executeTarget(Project.java:1306)
	at org.apache.tools.ant.helper.DefaultExecutor.executeTargets(DefaultExecutor.java:41)
	at org.apache.tools.ant.Project.executeTargets(Project.java:1189)
	at org.apache.tools.ant.Main.runBuild(Main.java:758)
	at org.apache.tools.ant.Main.startAnt(Main.java:217)
	at org.apache.tools.ant.launch.Launcher.run(Launcher.java:257)
	at org.apache.tools.ant.launch.Launcher.main(Launcher.java:104)

Total time: 0 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1310

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1310/changes>

Changes:

[dvryaboy] PIG-2882: Use Deque instead of Stack

[dvryaboy] PIG-2781: LOSort isEqual method

[dvryaboy] PIG-2835: Optimizing the convertion from bytes to Integer/Long

------------------------------------------
[...truncated 37253 lines...]
    [junit] 12/09/03 10:32:24 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/09/03 10:32:24 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-642507891
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-642507891
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:132)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] Shutting down DataNode 2
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/09/03 10:32:24 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/09/03 10:32:24 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/09/03 10:32:24 INFO ipc.Server: Stopping server on 51549
    [junit] 12/09/03 10:32:24 INFO ipc.Server: IPC Server handler 1 on 51549: exiting
    [junit] 12/09/03 10:32:24 INFO ipc.Server: IPC Server handler 2 on 51549: exiting
    [junit] 12/09/03 10:32:24 INFO ipc.Server: IPC Server handler 0 on 51549: exiting
    [junit] 12/09/03 10:32:24 INFO ipc.Server: Stopping IPC Server listener on 51549
    [junit] 12/09/03 10:32:24 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/09/03 10:32:24 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/03 10:32:24 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:42916, storageID=DS-1173411832-67.195.138.20-42916-1346667868489, infoPort=37378, ipcPort=51549):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/09/03 10:32:24 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/09/03 10:32:24 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/09/03 10:32:24 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/09/03 10:32:24 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:42916, storageID=DS-1173411832-67.195.138.20-42916-1346667868489, infoPort=37378, ipcPort=51549):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data5/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data6/current'}>
    [junit] 12/09/03 10:32:24 INFO ipc.Server: Stopping server on 51549
    [junit] 12/09/03 10:32:24 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/03 10:32:24 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/09/03 10:32:24 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/09/03 10:32:24 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/09/03 10:32:24 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1906032289
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1906032289
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:132)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/09/03 10:32:24 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] Shutting down DataNode 1
    [junit] 12/09/03 10:32:24 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/09/03 10:32:24 INFO ipc.Server: Stopping server on 51849
    [junit] 12/09/03 10:32:24 INFO ipc.Server: IPC Server handler 1 on 51849: exiting
    [junit] 12/09/03 10:32:24 INFO ipc.Server: IPC Server handler 0 on 51849: exiting
    [junit] 12/09/03 10:32:24 INFO ipc.Server: IPC Server handler 2 on 51849: exiting
    [junit] 12/09/03 10:32:24 INFO ipc.Server: Stopping IPC Server listener on 51849
    [junit] 12/09/03 10:32:24 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/03 10:32:24 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/09/03 10:32:24 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:47754, storageID=DS-1534103407-67.195.138.20-47754-1346667868099, infoPort=54192, ipcPort=51849):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/09/03 10:32:24 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/09/03 10:32:24 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/09/03 10:32:24 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/09/03 10:32:24 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:47754, storageID=DS-1534103407-67.195.138.20-47754-1346667868099, infoPort=54192, ipcPort=51849):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data3/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data4/current'}>
    [junit] 12/09/03 10:32:24 INFO ipc.Server: Stopping server on 51849
    [junit] 12/09/03 10:32:24 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/03 10:32:24 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/09/03 10:32:24 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/09/03 10:32:24 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/09/03 10:32:24 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1464896258
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1464896258
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:132)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/09/03 10:32:24 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] Shutting down DataNode 0
    [junit] 12/09/03 10:32:24 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/09/03 10:32:24 INFO ipc.Server: Stopping server on 60146
    [junit] 12/09/03 10:32:24 INFO ipc.Server: IPC Server handler 2 on 60146: exiting
    [junit] 12/09/03 10:32:24 INFO ipc.Server: IPC Server handler 0 on 60146: exiting
    [junit] 12/09/03 10:32:24 INFO ipc.Server: Stopping IPC Server listener on 60146
    [junit] 12/09/03 10:32:24 INFO ipc.Server: IPC Server handler 1 on 60146: exiting
    [junit] 12/09/03 10:32:24 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/03 10:32:24 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/09/03 10:32:24 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:51600, storageID=DS-88535959-67.195.138.20-51600-1346667867682, infoPort=58939, ipcPort=60146):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/09/03 10:32:24 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/09/03 10:32:24 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/09/03 10:32:24 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/09/03 10:32:24 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:51600, storageID=DS-88535959-67.195.138.20-51600-1346667867682, infoPort=58939, ipcPort=60146):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data1/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data2/current'}>
    [junit] 12/09/03 10:32:24 WARN util.MBeans: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.unRegisterMXBean(DataNode.java:513)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:726)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.run(DataNode.java:1442)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/09/03 10:32:24 INFO ipc.Server: Stopping server on 60146
    [junit] 12/09/03 10:32:24 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/03 10:32:24 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/09/03 10:32:24 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/09/03 10:32:24 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/09/03 10:32:24 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId16951099
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId16951099
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:132)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/09/03 10:32:24 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/09/03 10:32:24 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/09/03 10:32:24 WARN namenode.FSNamesystem: ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 12/09/03 10:32:24 INFO namenode.FSNamesystem: Number of transactions: 502 Total time for transactions(ms): 8Number of transactions batched in Syncs: 155 Number of syncs: 349 SyncTimes(ms): 5215 226 
    [junit] 12/09/03 10:32:24 INFO namenode.DecommissionManager: Interrupted Monitor
    [junit] java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.hdfs.server.namenode.DecommissionManager$Monitor.run(DecommissionManager.java:65)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/09/03 10:32:24 INFO ipc.Server: Stopping server on 33842
    [junit] 12/09/03 10:32:24 INFO ipc.Server: IPC Server handler 0 on 33842: exiting
    [junit] 12/09/03 10:32:24 INFO ipc.Server: IPC Server handler 1 on 33842: exiting
    [junit] 12/09/03 10:32:24 INFO ipc.Server: IPC Server handler 2 on 33842: exiting
    [junit] 12/09/03 10:32:24 INFO ipc.Server: IPC Server handler 5 on 33842: exiting
    [junit] 12/09/03 10:32:24 INFO ipc.Server: Stopping IPC Server listener on 33842
    [junit] 12/09/03 10:32:24 INFO ipc.Server: IPC Server handler 3 on 33842: exiting
    [junit] 12/09/03 10:32:24 INFO ipc.Server: IPC Server handler 4 on 33842: exiting
    [junit] 12/09/03 10:32:24 INFO ipc.Server: IPC Server handler 6 on 33842: exiting
    [junit] 12/09/03 10:32:24 INFO ipc.Server: IPC Server handler 7 on 33842: exiting
    [junit] 12/09/03 10:32:24 INFO ipc.Server: IPC Server handler 8 on 33842: exiting
    [junit] 12/09/03 10:32:24 INFO ipc.Server: IPC Server handler 9 on 33842: exiting
    [junit] 12/09/03 10:32:24 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/03 10:32:24 INFO ipc.Server: Stopping IPC Server Responder
    [junit] Tests run: 17, Failures: 3, Errors: 3, Time elapsed: 470.301 sec
    [junit] Test org.apache.pig.test.TestStore FAILED
    [junit] Running org.apache.pig.test.TestStringUDFs
    [junit] 12/09/03 10:32:25 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.NullPointerException
    [junit] 12/09/03 10:32:25 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/09/03 10:32:25 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -1
    [junit] 12/09/03 10:32:25 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -8
    [junit] 12/09/03 10:32:25 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/09/03 10:32:25 WARN builtin.INDEXOF: No logger object provided to UDF: org.apache.pig.builtin.INDEXOF. Failed to process input; error - null
    [junit] 12/09/03 10:32:25 WARN builtin.LAST_INDEX_OF: No logger object provided to UDF: org.apache.pig.builtin.LAST_INDEX_OF. Failed to process input; error - null
    [junit] Tests run: 11, Failures: 0, Errors: 0, Time elapsed: 0.236 sec
   [delete] Deleting directory /tmp/pig_junit_tmp240045692

BUILD FAILED
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:810: The following error occurred while executing this line:
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:878: Tests failed!

Total time: 19 minutes 38 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1309

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1309/changes>

Changes:

[dvryaboy] PIG-2886: Add Scan TimeRange to HBaseStorage

------------------------------------------
[...truncated 35785 lines...]
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:132)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] Shutting down DataNode 2
    [junit] 12/09/02 22:55:55 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/09/02 22:55:55 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/09/02 22:55:55 INFO ipc.Server: Stopping server on 60589
    [junit] 12/09/02 22:55:55 INFO ipc.Server: IPC Server handler 0 on 60589: exiting
    [junit] 12/09/02 22:55:55 INFO ipc.Server: IPC Server handler 1 on 60589: exiting
    [junit] 12/09/02 22:55:55 INFO ipc.Server: IPC Server handler 2 on 60589: exiting
    [junit] 12/09/02 22:55:55 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/02 22:55:55 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/09/02 22:55:55 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/09/02 22:55:55 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:48949, storageID=DS-369295377-67.195.138.24-48949-1346625811301, infoPort=41941, ipcPort=60589):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/09/02 22:55:55 INFO ipc.Server: Stopping IPC Server listener on 60589
    [junit] 12/09/02 22:55:55 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/09/02 22:55:55 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/09/02 22:55:55 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:48949, storageID=DS-369295377-67.195.138.24-48949-1346625811301, infoPort=41941, ipcPort=60589):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data5/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data6/current'}>
    [junit] 12/09/02 22:55:55 INFO ipc.Server: Stopping server on 60589
    [junit] 12/09/02 22:55:55 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/02 22:55:55 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/09/02 22:55:55 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/09/02 22:55:55 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/09/02 22:55:56 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/09/02 22:55:56 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1208376097
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1208376097
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:132)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] Shutting down DataNode 1
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/09/02 22:55:56 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/09/02 22:55:56 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/09/02 22:55:56 INFO ipc.Server: Stopping server on 46192
    [junit] 12/09/02 22:55:56 INFO ipc.Server: IPC Server handler 1 on 46192: exiting
    [junit] 12/09/02 22:55:56 INFO ipc.Server: Stopping IPC Server listener on 46192
    [junit] 12/09/02 22:55:56 INFO ipc.Server: IPC Server handler 0 on 46192: exiting
    [junit] 12/09/02 22:55:56 INFO ipc.Server: IPC Server handler 2 on 46192: exiting
    [junit] 12/09/02 22:55:56 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/02 22:55:56 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/09/02 22:55:56 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/09/02 22:55:56 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:46358, storageID=DS-1066715473-67.195.138.24-46358-1346625810214, infoPort=41446, ipcPort=46192):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/09/02 22:55:56 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/09/02 22:55:57 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:46358 to delete  blk_-5156259703248762711_1095 blk_2323419882001566240_1102 blk_6539147306514741492_1102
    [junit] 12/09/02 22:55:57 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:48949 to delete  blk_-3696902671695355799_1101 blk_2323419882001566240_1102 blk_6539147306514741492_1102
    [junit] 12/09/02 22:55:57 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/09/02 22:55:57 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/09/02 22:55:57 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:46358, storageID=DS-1066715473-67.195.138.24-46358-1346625810214, infoPort=41446, ipcPort=46192):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data3/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data4/current'}>
    [junit] 12/09/02 22:55:57 INFO ipc.Server: Stopping server on 46192
    [junit] 12/09/02 22:55:57 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/02 22:55:57 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/09/02 22:55:57 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/09/02 22:55:57 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/09/02 22:55:57 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId76222524
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId76222524
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:132)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] Shutting down DataNode 0
    [junit] 12/09/02 22:55:57 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/09/02 22:55:57 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/09/02 22:55:57 INFO ipc.Server: Stopping server on 42314
    [junit] 12/09/02 22:55:57 INFO ipc.Server: IPC Server handler 0 on 42314: exiting
    [junit] 12/09/02 22:55:57 INFO ipc.Server: IPC Server handler 2 on 42314: exiting
    [junit] 12/09/02 22:55:57 INFO ipc.Server: IPC Server handler 1 on 42314: exiting
    [junit] 12/09/02 22:55:57 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/09/02 22:55:57 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/02 22:55:57 INFO ipc.Server: Stopping IPC Server listener on 42314
    [junit] 12/09/02 22:55:57 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/09/02 22:55:57 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:44391, storageID=DS-1455941524-67.195.138.24-44391-1346625809232, infoPort=50103, ipcPort=42314):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/09/02 22:55:57 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/09/02 22:55:58 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/09/02 22:55:58 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/09/02 22:55:58 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:44391, storageID=DS-1455941524-67.195.138.24-44391-1346625809232, infoPort=50103, ipcPort=42314):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data1/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data2/current'}>
    [junit] 12/09/02 22:55:58 WARN util.MBeans: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.unRegisterMXBean(DataNode.java:513)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:726)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.run(DataNode.java:1442)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/09/02 22:55:58 INFO ipc.Server: Stopping server on 42314
    [junit] 12/09/02 22:55:58 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/02 22:55:58 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/09/02 22:55:58 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/09/02 22:55:58 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/09/02 22:55:58 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1997692618
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1997692618
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:132)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/09/02 22:55:58 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/09/02 22:55:58 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/09/02 22:55:58 INFO namenode.DecommissionManager: Interrupted Monitor
    [junit] java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.hdfs.server.namenode.DecommissionManager$Monitor.run(DecommissionManager.java:65)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/09/02 22:55:58 WARN namenode.FSNamesystem: ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 12/09/02 22:55:58 INFO namenode.FSNamesystem: Number of transactions: 502 Total time for transactions(ms): 5Number of transactions batched in Syncs: 168 Number of syncs: 348 SyncTimes(ms): 3751 244 
    [junit] 12/09/02 22:55:58 INFO ipc.Server: Stopping server on 42878
    [junit] 12/09/02 22:55:58 INFO ipc.Server: IPC Server handler 0 on 42878: exiting
    [junit] 12/09/02 22:55:58 INFO ipc.Server: IPC Server handler 8 on 42878: exiting
    [junit] 12/09/02 22:55:58 INFO ipc.Server: IPC Server handler 5 on 42878: exiting
    [junit] 12/09/02 22:55:58 INFO ipc.Server: IPC Server handler 9 on 42878: exiting
    [junit] Tests run: 17, Failures: 3, Errors: 3, Time elapsed: 738.115 sec
    [junit] 12/09/02 22:55:58 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/09/02 22:55:58 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/09/02 22:55:58 INFO ipc.Server: IPC Server handler 1 on 42878: exiting
    [junit] 12/09/02 22:55:58 INFO ipc.Server: IPC Server handler 4 on 42878: exiting
    [junit] 12/09/02 22:55:58 INFO ipc.Server: IPC Server handler 2 on 42878: exiting
    [junit] 12/09/02 22:55:58 INFO ipc.Server: Stopping IPC Server listener on 42878
    [junit] 12/09/02 22:55:58 INFO ipc.Server: IPC Server handler 7 on 42878: exiting
    [junit] 12/09/02 22:55:58 INFO ipc.Server: IPC Server handler 6 on 42878: exiting
    [junit] 12/09/02 22:55:58 INFO ipc.Server: IPC Server handler 3 on 42878: exiting
    [junit] Test org.apache.pig.test.TestStore FAILED
    [junit] Running org.apache.pig.test.TestStringUDFs
    [junit] 12/09/02 22:56:00 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.NullPointerException
    [junit] 12/09/02 22:56:00 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/09/02 22:56:00 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -1
    [junit] 12/09/02 22:56:00 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -8
    [junit] 12/09/02 22:56:00 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/09/02 22:56:00 WARN builtin.INDEXOF: No logger object provided to UDF: org.apache.pig.builtin.INDEXOF. Failed to process input; error - null
    [junit] 12/09/02 22:56:00 WARN builtin.LAST_INDEX_OF: No logger object provided to UDF: org.apache.pig.builtin.LAST_INDEX_OF. Failed to process input; error - null
    [junit] Tests run: 11, Failures: 0, Errors: 0, Time elapsed: 0.444 sec
   [delete] Deleting directory /tmp/pig_junit_tmp1620004064

BUILD FAILED
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:810: The following error occurred while executing this line:
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:878: Tests failed!

Total time: 29 minutes 5 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1308

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1308/changes>

Changes:

[julien] PIG-2896  Pig does not fail anymore if two macros are declared with the same name

[thejas] PIG-2895: jodatime jar missing in pig-withouthadoop.jar  (thejas)

[dvryaboy] PIG-2888: Improve performance of POPartialAgg

[gates] PIG-2893 fix DBStorage compile issue

------------------------------------------
[...truncated 6512 lines...]
 [findbugs]   com.google.common.util.concurrent.CheckedFuture
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Reader$Scanner$Entry
 [findbugs]   org.apache.hadoop.fs.FSDataInputStream
 [findbugs]   org.python.core.PyObject
 [findbugs]   jline.History
 [findbugs]   org.jruby.embed.internal.LocalContextProvider
 [findbugs]   org.apache.hadoop.io.BooleanWritable
 [findbugs]   org.apache.log4j.Logger
 [findbugs]   org.apache.hadoop.hbase.filter.FamilyFilter
 [findbugs]   groovy.lang.Tuple
 [findbugs]   org.antlr.runtime.IntStream
 [findbugs]   org.apache.hadoop.util.ReflectionUtils
 [findbugs]   org.apache.hadoop.fs.ContentSummary
 [findbugs]   org.jruby.runtime.builtin.IRubyObject
 [findbugs]   org.jruby.RubyInteger
 [findbugs]   org.python.core.PyTuple
 [findbugs]   org.mortbay.log.Log
 [findbugs]   org.apache.hadoop.conf.Configuration
 [findbugs]   com.google.common.base.Joiner
 [findbugs]   org.apache.hadoop.mapreduce.lib.input.FileSplit
 [findbugs]   org.apache.hadoop.mapred.Counters$Counter
 [findbugs]   com.jcraft.jsch.Channel
 [findbugs]   org.apache.hadoop.mapred.JobPriority
 [findbugs]   org.apache.commons.cli.Options
 [findbugs]   org.apache.hadoop.mapred.JobID
 [findbugs]   org.apache.hadoop.util.bloom.BloomFilter
 [findbugs]   org.python.core.PyFrame
 [findbugs]   org.apache.hadoop.hbase.filter.CompareFilter
 [findbugs]   org.apache.hadoop.util.VersionInfo
 [findbugs]   org.python.core.PyString
 [findbugs]   org.apache.hadoop.io.Text$Comparator
 [findbugs]   org.jruby.runtime.Block
 [findbugs]   org.antlr.runtime.MismatchedSetException
 [findbugs]   org.apache.hadoop.io.BytesWritable
 [findbugs]   org.apache.hadoop.fs.FsShell
 [findbugs]   org.joda.time.Months
 [findbugs]   org.mozilla.javascript.ImporterTopLevel
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableOutputFormat
 [findbugs]   org.apache.hadoop.mapred.TaskReport
 [findbugs]   org.apache.hadoop.security.UserGroupInformation
 [findbugs]   org.antlr.runtime.tree.RewriteRuleSubtreeStream
 [findbugs]   org.apache.commons.cli.HelpFormatter
 [findbugs]   com.google.common.collect.Maps
 [findbugs]   org.joda.time.ReadableInstant
 [findbugs]   org.mozilla.javascript.NativeObject
 [findbugs]   org.apache.hadoop.hbase.HConstants
 [findbugs]   org.apache.hadoop.io.serializer.Deserializer
 [findbugs]   org.antlr.runtime.FailedPredicateException
 [findbugs]   org.apache.hadoop.io.compress.CompressionCodec
 [findbugs]   org.jruby.RubyNil
 [findbugs]   org.apache.hadoop.fs.FileStatus
 [findbugs]   org.apache.hadoop.hbase.client.Result
 [findbugs]   org.apache.hadoop.mapreduce.JobContext
 [findbugs]   org.codehaus.jackson.JsonGenerator
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptContext
 [findbugs]   org.apache.hadoop.io.BytesWritable$Comparator
 [findbugs]   org.apache.hadoop.io.LongWritable$Comparator
 [findbugs]   org.codehaus.jackson.map.util.LRUMap
 [findbugs]   org.apache.hadoop.hbase.util.Bytes
 [findbugs]   org.antlr.runtime.MismatchedTokenException
 [findbugs]   org.codehaus.jackson.JsonParser
 [findbugs]   com.jcraft.jsch.UserInfo
 [findbugs]   org.python.core.PyException
 [findbugs]   org.apache.commons.cli.ParseException
 [findbugs]   org.apache.hadoop.io.compress.CompressionOutputStream
 [findbugs]   org.apache.hadoop.hbase.filter.WritableByteArrayComparable
 [findbugs]   org.antlr.runtime.tree.CommonTreeNodeStream
 [findbugs]   org.apache.log4j.Level
 [findbugs]   org.apache.hadoop.hbase.client.Scan
 [findbugs]   org.jruby.anno.JRubyMethod
 [findbugs]   org.apache.hadoop.mapreduce.Job
 [findbugs]   com.google.common.util.concurrent.Futures
 [findbugs]   org.apache.commons.logging.LogFactory
 [findbugs]   org.apache.commons.codec.binary.Base64
 [findbugs]   org.codehaus.jackson.map.ObjectMapper
 [findbugs]   org.apache.hadoop.fs.FileSystem
 [findbugs]   org.jruby.embed.LocalContextScope
 [findbugs]   org.apache.hadoop.hbase.filter.FilterList$Operator
 [findbugs]   org.jruby.RubySymbol
 [findbugs]   org.apache.hadoop.hbase.io.ImmutableBytesWritable
 [findbugs]   org.apache.hadoop.io.serializer.SerializationFactory
 [findbugs]   org.antlr.runtime.tree.TreeAdaptor
 [findbugs]   org.apache.hadoop.mapred.RunningJob
 [findbugs]   org.antlr.runtime.CommonTokenStream
 [findbugs]   org.apache.hadoop.io.DataInputBuffer
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile
 [findbugs]   org.apache.commons.cli.GnuParser
 [findbugs]   org.mozilla.javascript.Context
 [findbugs]   org.apache.hadoop.io.FloatWritable
 [findbugs]   org.antlr.runtime.tree.RewriteEarlyExitException
 [findbugs]   org.apache.hadoop.hbase.HBaseConfiguration
 [findbugs]   org.codehaus.jackson.JsonGenerationException
 [findbugs]   org.apache.hadoop.mapreduce.TaskInputOutputContext
 [findbugs]   org.apache.hadoop.io.compress.GzipCodec
 [findbugs]   org.jruby.RubyString
 [findbugs]   org.antlr.runtime.BaseRecognizer
 [findbugs]   org.apache.hadoop.fs.FileUtil
 [findbugs]   org.apache.hadoop.fs.Path
 [findbugs]   org.jruby.RubyFixnum
 [findbugs]   org.apache.hadoop.hbase.client.Put
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Writer
 [findbugs]   jline.ConsoleReader
 [findbugs]   com.google.common.collect.Lists
 [findbugs]   org.apache.hadoop.mapreduce.MapContext
 [findbugs]   org.python.core.PyJavaPackage
 [findbugs]   org.apache.hadoop.hbase.filter.ColumnPrefixFilter
 [findbugs]   org.python.core.PyStringMap
 [findbugs]   groovy.util.ResourceException
 [findbugs]   org.joda.time.format.DateTimeFormat
 [findbugs]   org.apache.hadoop.mapreduce.TaskID
 [findbugs]   org.joda.time.Period
 [findbugs]   org.jruby.embed.ScriptingContainer
 [findbugs]   org.jruby.Ruby
 [findbugs]   groovy.util.GroovyScriptEngine
 [findbugs]   org.apache.hadoop.hbase.client.HTable
 [findbugs]   org.apache.hadoop.io.FloatWritable$Comparator
 [findbugs]   org.apache.zookeeper.ZooKeeper
 [findbugs]   org.codehaus.jackson.map.JsonMappingException
 [findbugs]   org.python.core.PyFunction
 [findbugs]   org.antlr.runtime.TokenSource
 [findbugs]   com.google.common.io.Files
 [findbugs]   groovy.util.ScriptException
 [findbugs]   com.jcraft.jsch.ChannelDirectTCPIP
 [findbugs]   com.jcraft.jsch.JSchException
 [findbugs]   org.python.util.PythonInterpreter
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil
 [findbugs]   org.python.core.PyInteger
 [findbugs]   org.apache.hadoop.mapred.JobConf
 [findbugs]   org.joda.time.DateTime
 [findbugs]   org.apache.hadoop.util.bloom.Key
 [findbugs]   org.joda.time.Years
 [findbugs]   org.apache.hadoop.io.Text
 [findbugs]   org.antlr.runtime.NoViableAltException
 [findbugs]   org.apache.hadoop.util.GenericOptionsParser
 [findbugs]   org.apache.hadoop.mapreduce.JobID
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptID
 [findbugs]   org.apache.hadoop.filecache.DistributedCache
 [findbugs]   org.joda.time.DateTimeZone
 [findbugs]   org.apache.hadoop.fs.FSDataOutputStream
 [findbugs]   org.python.core.PyList
 [findbugs]   org.antlr.runtime.tree.TreeNodeStream
 [findbugs]   org.apache.hadoop.hbase.filter.BinaryComparator
 [findbugs]   dk.brics.automaton.RegExp
 [findbugs]   org.mozilla.javascript.Scriptable
 [findbugs]   org.mozilla.javascript.EcmaError
 [findbugs]   org.apache.hadoop.io.serializer.Serializer
 [findbugs]   org.fusesource.jansi.AnsiConsole
 [findbugs]   org.apache.hadoop.util.bloom.Filter
 [findbugs]   org.python.core.PyNone
 [findbugs]   org.mozilla.javascript.Function
 [findbugs]   org.python.core.PySystemState
 [findbugs]   org.antlr.runtime.RecognizerSharedState
 [findbugs]   org.codehaus.jackson.JsonFactory
 [findbugs]   org.antlr.runtime.EarlyExitException
 [findbugs]   org.apache.hadoop.hdfs.DistributedFileSystem
 [findbugs]   org.apache.hadoop.util.LineReader
 [findbugs] Warnings generated: 42
 [findbugs] Missing classes: 282
 [findbugs] Calculating exit code...
 [findbugs] Setting 'missing class' flag (2)
 [findbugs] Setting 'bugs found' flag (1)
 [findbugs] Exit code set to: 3
 [findbugs] Java Result: 3
 [findbugs] Classes needed for analysis were missing
 [findbugs] Output saved to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml>
     [xslt] Processing <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml> to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.html>
     [xslt] Loading stylesheet /home/jenkins/tools/findbugs/latest/src/xsl/default.xsl

BUILD SUCCESSFUL
Total time: 5 minutes 24 seconds


======================================================================
======================================================================
STORE: saving artifacts
======================================================================
======================================================================




======================================================================
======================================================================
CLEAN: cleaning workspace
======================================================================
======================================================================


Buildfile: build.xml

clean:
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src-gen>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/docs/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/test/org/apache/pig/test/utils/dotGraph/parser>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig.jar>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig-withouthadoop.jar>

clean:

clean:

BUILD SUCCESSFUL
Total time: 0 seconds


======================================================================
======================================================================
ANALYSIS: ant -Drun.clover=true -Dclover.home=/homes/hudson/tools/clover/latest clover test-commit generate-clover-reports -Dtest.junit.output.format=xml -Dtest.output=yes -Dversion=${BUILD_ID} -Dfindbugs.home=$FINDBUGS_HOME -Djava5.home=$JAVA5_HOME -Dforrest.home=$FORREST_HOME -Dclover.home=$CLOVER_HOME -Declipse.home=$ECLIPSE_HOME
======================================================================
======================================================================


Buildfile: build.xml

clover.setup:
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/clover/db>
[clover-setup] Clover Version 3.1.0, built on May 31 2011 (build-821)
[clover-setup] Loaded from: /home/jenkins/tools/clover/latest/lib/clover.jar

BUILD FAILED
java.lang.RuntimeException: Clover upgrades for your license ended December 14 2010, and this version of Clover was built May 31 2011. Please visit http://www.atlassian.com/clover/renew for information on upgrading your license.
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:103)
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:25)
	at com.cenqua.clover.tasks.AbstractCloverTask.execute(AbstractCloverTask.java:52)
	at org.apache.tools.ant.UnknownElement.execute(UnknownElement.java:288)
	at sun.reflect.GeneratedMethodAccessor1.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
	at java.lang.reflect.Method.invoke(Method.java:597)
	at org.apache.tools.ant.dispatch.DispatchUtils.execute(DispatchUtils.java:106)
	at org.apache.tools.ant.Task.perform(Task.java:348)
	at org.apache.tools.ant.Target.execute(Target.java:357)
	at org.apache.tools.ant.Target.performTasks(Target.java:385)
	at org.apache.tools.ant.Project.executeSortedTargets(Project.java:1337)
	at org.apache.tools.ant.Project.executeTarget(Project.java:1306)
	at org.apache.tools.ant.helper.DefaultExecutor.executeTargets(DefaultExecutor.java:41)
	at org.apache.tools.ant.Project.executeTargets(Project.java:1189)
	at org.apache.tools.ant.Main.runBuild(Main.java:758)
	at org.apache.tools.ant.Main.startAnt(Main.java:217)
	at org.apache.tools.ant.launch.Launcher.run(Launcher.java:257)
	at org.apache.tools.ant.launch.Launcher.main(Launcher.java:104)

Total time: 1 second
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1307

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1307/changes>

Changes:

[thejas] removing JobControlCompiler.java.orig that was committed unintentionally as part of PIG-1314 changes

------------------------------------------
[...truncated 35827 lines...]
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:132)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] Shutting down DataNode 2
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/29 10:32:42 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/29 10:32:42 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/29 10:32:42 INFO ipc.Server: Stopping server on 56714
    [junit] 12/08/29 10:32:42 INFO ipc.Server: IPC Server handler 0 on 56714: exiting
    [junit] 12/08/29 10:32:42 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/29 10:32:42 INFO ipc.Server: IPC Server handler 2 on 56714: exiting
    [junit] 12/08/29 10:32:42 INFO ipc.Server: Stopping IPC Server listener on 56714
    [junit] 12/08/29 10:32:42 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:60763, storageID=DS-139338274-67.195.138.20-60763-1346235901246, infoPort=37563, ipcPort=56714):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/08/29 10:32:42 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/08/29 10:32:42 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/08/29 10:32:42 INFO ipc.Server: IPC Server handler 1 on 56714: exiting
    [junit] 12/08/29 10:32:42 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/08/29 10:32:42 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/29 10:32:42 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:60763, storageID=DS-139338274-67.195.138.20-60763-1346235901246, infoPort=37563, ipcPort=56714):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data5/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data6/current'}>
    [junit] 12/08/29 10:32:42 INFO ipc.Server: Stopping server on 56714
    [junit] 12/08/29 10:32:42 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/29 10:32:42 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/29 10:32:42 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/08/29 10:32:42 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/08/29 10:32:42 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1028645147
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1028645147
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] Shutting down DataNode 1
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:132)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/29 10:32:42 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/29 10:32:42 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/29 10:32:42 INFO ipc.Server: Stopping server on 55053
    [junit] 12/08/29 10:32:42 INFO ipc.Server: IPC Server handler 1 on 55053: exiting
    [junit] 12/08/29 10:32:42 INFO ipc.Server: IPC Server handler 0 on 55053: exiting
    [junit] 12/08/29 10:32:42 INFO ipc.Server: IPC Server handler 2 on 55053: exiting
    [junit] 12/08/29 10:32:42 INFO ipc.Server: Stopping IPC Server listener on 55053
    [junit] 12/08/29 10:32:42 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/29 10:32:42 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/29 10:32:42 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/08/29 10:32:42 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:34282, storageID=DS-2033667451-67.195.138.20-34282-1346235900862, infoPort=36173, ipcPort=55053):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/08/29 10:32:42 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/08/29 10:32:42 INFO mapred.TaskTracker: Received 'KillJobAction' for job: job_20120829102501716_0012
    [junit] 12/08/29 10:32:42 WARN mapred.TaskTracker: Unknown job job_20120829102501716_0012 being deleted.
    [junit] 12/08/29 10:32:42 INFO mapred.TaskTracker: Received 'KillJobAction' for job: job_20120829102501716_0012
    [junit] 12/08/29 10:32:42 WARN mapred.TaskTracker: Unknown job job_20120829102501716_0012 being deleted.
    [junit] 12/08/29 10:32:42 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/08/29 10:32:43 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:34282, storageID=DS-2033667451-67.195.138.20-34282-1346235900862, infoPort=36173, ipcPort=55053):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data3/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data4/current'}>
    [junit] 12/08/29 10:32:43 INFO ipc.Server: Stopping server on 55053
    [junit] 12/08/29 10:32:43 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/29 10:32:43 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/29 10:32:43 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/08/29 10:32:43 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/08/29 10:32:43 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/29 10:32:43 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-459042572
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-459042572
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] Shutting down DataNode 0
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:132)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/29 10:32:43 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/29 10:32:43 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/29 10:32:43 INFO ipc.Server: Stopping server on 48425
    [junit] 12/08/29 10:32:43 INFO ipc.Server: IPC Server handler 0 on 48425: exiting
    [junit] 12/08/29 10:32:43 INFO ipc.Server: IPC Server handler 2 on 48425: exiting
    [junit] 12/08/29 10:32:43 INFO ipc.Server: IPC Server handler 1 on 48425: exiting
    [junit] 12/08/29 10:32:43 INFO ipc.Server: Stopping IPC Server listener on 48425
    [junit] 12/08/29 10:32:43 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/29 10:32:43 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/29 10:32:43 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/08/29 10:32:43 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:60348, storageID=DS-1196540904-67.195.138.20-60348-1346235900445, infoPort=35856, ipcPort=48425):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/08/29 10:32:43 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/08/29 10:32:44 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:60348 to delete  blk_-2807317286965277449_1095 blk_45262638907299986_1101 blk_-7937264573005326398_1102 blk_798867963620222208_1102
    [junit] 12/08/29 10:32:44 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:34282 to delete  blk_-2807317286965277449_1095 blk_45262638907299986_1101 blk_-7937264573005326398_1102
    [junit] 12/08/29 10:32:44 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/08/29 10:32:44 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/29 10:32:44 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:60348, storageID=DS-1196540904-67.195.138.20-60348-1346235900445, infoPort=35856, ipcPort=48425):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data1/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data2/current'}>
    [junit] 12/08/29 10:32:44 WARN util.MBeans: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.unRegisterMXBean(DataNode.java:513)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:726)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.run(DataNode.java:1442)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/08/29 10:32:44 INFO ipc.Server: Stopping server on 48425
    [junit] 12/08/29 10:32:44 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/29 10:32:44 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/29 10:32:44 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/08/29 10:32:44 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/08/29 10:32:44 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId234781259
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId234781259
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:132)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/29 10:32:44 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/29 10:32:44 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/29 10:32:44 WARN namenode.FSNamesystem: ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 12/08/29 10:32:44 INFO namenode.DecommissionManager: Interrupted Monitor
    [junit] java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.hdfs.server.namenode.DecommissionManager$Monitor.run(DecommissionManager.java:65)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/08/29 10:32:44 INFO namenode.FSNamesystem: Number of transactions: 502 Total time for transactions(ms): 9Number of transactions batched in Syncs: 148 Number of syncs: 348 SyncTimes(ms): 3069 258 
    [junit] 12/08/29 10:32:44 INFO ipc.Server: Stopping server on 54949
    [junit] 12/08/29 10:32:44 INFO ipc.Server: IPC Server handler 0 on 54949: exiting
    [junit] 12/08/29 10:32:44 INFO ipc.Server: IPC Server handler 1 on 54949: exiting
    [junit] 12/08/29 10:32:44 INFO ipc.Server: IPC Server handler 2 on 54949: exiting
    [junit] 12/08/29 10:32:44 INFO ipc.Server: IPC Server handler 3 on 54949: exiting
    [junit] 12/08/29 10:32:44 INFO ipc.Server: IPC Server handler 4 on 54949: exiting
    [junit] 12/08/29 10:32:44 INFO ipc.Server: IPC Server handler 5 on 54949: exiting
    [junit] 12/08/29 10:32:44 INFO ipc.Server: IPC Server handler 6 on 54949: exiting
    [junit] 12/08/29 10:32:44 INFO ipc.Server: IPC Server handler 7 on 54949: exiting
    [junit] 12/08/29 10:32:44 INFO ipc.Server: IPC Server handler 8 on 54949: exiting
    [junit] 12/08/29 10:32:44 INFO ipc.Server: IPC Server handler 9 on 54949: exiting
    [junit] 12/08/29 10:32:44 INFO ipc.Server: Stopping IPC Server listener on 54949
    [junit] 12/08/29 10:32:44 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/29 10:32:44 INFO ipc.Server: Stopping IPC Server Responder
    [junit] Tests run: 17, Failures: 3, Errors: 3, Time elapsed: 457.431 sec
    [junit] Test org.apache.pig.test.TestStore FAILED
    [junit] Running org.apache.pig.test.TestStringUDFs
    [junit] 12/08/29 10:32:45 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.NullPointerException
    [junit] 12/08/29 10:32:45 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/08/29 10:32:45 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -1
    [junit] 12/08/29 10:32:45 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -8
    [junit] 12/08/29 10:32:45 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/08/29 10:32:45 WARN builtin.INDEXOF: No logger object provided to UDF: org.apache.pig.builtin.INDEXOF. Failed to process input; error - null
    [junit] 12/08/29 10:32:45 WARN builtin.LAST_INDEX_OF: No logger object provided to UDF: org.apache.pig.builtin.LAST_INDEX_OF. Failed to process input; error - null
    [junit] Tests run: 11, Failures: 0, Errors: 0, Time elapsed: 0.236 sec
   [delete] Deleting directory /tmp/pig_junit_tmp671510274

BUILD FAILED
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:793: The following error occurred while executing this line:
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:861: Tests failed!

Total time: 19 minutes 25 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1306

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1306/changes>

Changes:

[daijy] PIG-2708: split MiniCluster based tests out of org.apache.pig.test.TestInputOutputFileValidator

[daijy] PIG-2821:  HBaseStorage should work with secure hbase

------------------------------------------
[...truncated 35898 lines...]
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:132)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/27 23:42:22 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] Shutting down DataNode 2
    [junit] 12/08/27 23:42:22 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/27 23:42:22 INFO ipc.Server: Stopping server on 55372
    [junit] 12/08/27 23:42:22 INFO ipc.Server: IPC Server handler 2 on 55372: exiting
    [junit] 12/08/27 23:42:22 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/27 23:42:22 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/08/27 23:42:22 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/27 23:42:22 INFO ipc.Server: IPC Server handler 1 on 55372: exiting
    [junit] 12/08/27 23:42:22 INFO ipc.Server: IPC Server handler 0 on 55372: exiting
    [junit] 12/08/27 23:42:22 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:34243, storageID=DS-1532624747-67.195.138.24-34243-1346110238303, infoPort=44328, ipcPort=55372):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/08/27 23:42:22 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/08/27 23:42:22 INFO ipc.Server: Stopping IPC Server listener on 55372
    [junit] 12/08/27 23:42:22 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/08/27 23:42:22 INFO datanode.DataNode: Scheduling block blk_-1622806193096722258_1101 file build/test/data/dfs/data/data3/current/blk_-1622806193096722258 for deletion
    [junit] 12/08/27 23:42:22 INFO datanode.DataNode: Scheduling block blk_3674274575908176342_1102 file build/test/data/dfs/data/data4/current/blk_3674274575908176342 for deletion
    [junit] 12/08/27 23:42:22 INFO datanode.DataNode: Scheduling block blk_6014977156259038012_1095 file build/test/data/dfs/data/data4/current/blk_6014977156259038012 for deletion
    [junit] 12/08/27 23:42:22 INFO datanode.DataNode: Deleted block blk_3674274575908176342_1102 at file build/test/data/dfs/data/data4/current/blk_3674274575908176342
    [junit] 12/08/27 23:42:22 INFO datanode.DataNode: Deleted block blk_6014977156259038012_1095 at file build/test/data/dfs/data/data4/current/blk_6014977156259038012
    [junit] 12/08/27 23:42:22 INFO datanode.DataNode: Deleted block blk_-1622806193096722258_1101 at file build/test/data/dfs/data/data3/current/blk_-1622806193096722258
    [junit] 12/08/27 23:42:23 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/27 23:42:23 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:34243, storageID=DS-1532624747-67.195.138.24-34243-1346110238303, infoPort=44328, ipcPort=55372):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data5/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data6/current'}>
    [junit] 12/08/27 23:42:23 INFO ipc.Server: Stopping server on 55372
    [junit] 12/08/27 23:42:23 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/27 23:42:23 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/27 23:42:23 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/08/27 23:42:23 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/08/27 23:42:23 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1795913186
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1795913186
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:132)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] Shutting down DataNode 1
    [junit] 12/08/27 23:42:23 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/27 23:42:23 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/27 23:42:23 INFO ipc.Server: Stopping server on 41693
    [junit] 12/08/27 23:42:23 INFO ipc.Server: Stopping IPC Server listener on 41693
    [junit] 12/08/27 23:42:23 INFO ipc.Server: IPC Server handler 0 on 41693: exiting
    [junit] 12/08/27 23:42:23 INFO ipc.Server: IPC Server handler 1 on 41693: exiting
    [junit] 12/08/27 23:42:23 INFO ipc.Server: IPC Server handler 2 on 41693: exiting
    [junit] 12/08/27 23:42:23 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/27 23:42:23 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/08/27 23:42:23 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/27 23:42:23 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:35216, storageID=DS-1611853691-67.195.138.24-35216-1346110237321, infoPort=51275, ipcPort=41693):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/08/27 23:42:23 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/08/27 23:42:23 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/08/27 23:42:24 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:34243 to delete  blk_5605318940028220558_1102 blk_3674274575908176342_1102 blk_-1622806193096722258_1101
    [junit] 12/08/27 23:42:24 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:47320 to delete  blk_5605318940028220558_1102 blk_6014977156259038012_1095
    [junit] 12/08/27 23:42:24 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/27 23:42:24 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:35216, storageID=DS-1611853691-67.195.138.24-35216-1346110237321, infoPort=51275, ipcPort=41693):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data3/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data4/current'}>
    [junit] 12/08/27 23:42:24 INFO ipc.Server: Stopping server on 41693
    [junit] 12/08/27 23:42:24 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/27 23:42:24 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/27 23:42:24 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/08/27 23:42:24 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/08/27 23:42:24 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId847695191
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId847695191
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:132)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] Shutting down DataNode 0
    [junit] 12/08/27 23:42:24 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/27 23:42:24 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/27 23:42:24 INFO ipc.Server: Stopping server on 49375
    [junit] 12/08/27 23:42:24 INFO ipc.Server: IPC Server handler 0 on 49375: exiting
    [junit] 12/08/27 23:42:24 INFO ipc.Server: IPC Server handler 2 on 49375: exiting
    [junit] 12/08/27 23:42:24 INFO ipc.Server: Stopping IPC Server listener on 49375
    [junit] 12/08/27 23:42:24 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/27 23:42:24 INFO ipc.Server: IPC Server handler 1 on 49375: exiting
    [junit] 12/08/27 23:42:24 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/08/27 23:42:24 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:58480, storageID=DS-827135619-67.195.138.24-58480-1346110236288, infoPort=42366, ipcPort=49375):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/08/27 23:42:24 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/08/27 23:42:24 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/27 23:42:24 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:58480, storageID=DS-827135619-67.195.138.24-58480-1346110236288, infoPort=42366, ipcPort=49375):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data1/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data2/current'}>
    [junit] 12/08/27 23:42:24 WARN util.MBeans: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.unRegisterMXBean(DataNode.java:513)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:726)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.run(DataNode.java:1442)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/08/27 23:42:24 INFO ipc.Server: Stopping server on 49375
    [junit] 12/08/27 23:42:24 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/27 23:42:24 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/27 23:42:24 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/08/27 23:42:24 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/08/27 23:42:24 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/08/27 23:42:25 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/27 23:42:25 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-459874824
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-459874824
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:132)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/27 23:42:25 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/27 23:42:25 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/27 23:42:25 INFO namenode.FSNamesystem: Number of transactions: 502 Total time for transactions(ms): 8Number of transactions batched in Syncs: 160 Number of syncs: 349 SyncTimes(ms): 3353 232 
    [junit] 12/08/27 23:42:25 INFO namenode.DecommissionManager: Interrupted Monitor
    [junit] java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.hdfs.server.namenode.DecommissionManager$Monitor.run(DecommissionManager.java:65)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/08/27 23:42:25 WARN namenode.FSNamesystem: ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 12/08/27 23:42:25 INFO ipc.Server: Stopping server on 40987
    [junit] 12/08/27 23:42:25 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/27 23:42:25 INFO ipc.Server: IPC Server handler 5 on 40987: exiting
    [junit] 12/08/27 23:42:25 INFO ipc.Server: IPC Server handler 0 on 40987: exiting
    [junit] 12/08/27 23:42:25 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/27 23:42:25 INFO ipc.Server: Stopping IPC Server listener on 40987
    [junit] 12/08/27 23:42:25 INFO ipc.Server: IPC Server handler 4 on 40987: exiting
    [junit] Tests run: 17, Failures: 3, Errors: 3, Time elapsed: 697.714 sec
    [junit] 12/08/27 23:42:25 INFO ipc.Server: IPC Server handler 6 on 40987: exiting
    [junit] 12/08/27 23:42:25 INFO ipc.Server: IPC Server handler 8 on 40987: exiting
    [junit] 12/08/27 23:42:25 INFO ipc.Server: IPC Server handler 1 on 40987: exiting
    [junit] 12/08/27 23:42:25 INFO ipc.Server: IPC Server handler 2 on 40987: exiting
    [junit] 12/08/27 23:42:25 INFO ipc.Server: IPC Server handler 7 on 40987: exiting
    [junit] 12/08/27 23:42:25 INFO ipc.Server: IPC Server handler 9 on 40987: exiting
    [junit] 12/08/27 23:42:25 INFO ipc.Server: IPC Server handler 3 on 40987: exiting
    [junit] Test org.apache.pig.test.TestStore FAILED
    [junit] Running org.apache.pig.test.TestStringUDFs
    [junit] 12/08/27 23:42:27 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.NullPointerException
    [junit] 12/08/27 23:42:27 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/08/27 23:42:27 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -1
    [junit] 12/08/27 23:42:27 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -8
    [junit] 12/08/27 23:42:27 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/08/27 23:42:27 WARN builtin.INDEXOF: No logger object provided to UDF: org.apache.pig.builtin.INDEXOF. Failed to process input; error - null
    [junit] 12/08/27 23:42:27 WARN builtin.LAST_INDEX_OF: No logger object provided to UDF: org.apache.pig.builtin.LAST_INDEX_OF. Failed to process input; error - null
    [junit] Tests run: 11, Failures: 0, Errors: 0, Time elapsed: 0.495 sec
   [delete] Deleting directory /tmp/pig_junit_tmp916267120

BUILD FAILED
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:793: The following error occurred while executing this line:
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:861: Tests failed!

Total time: 28 minutes 18 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1305

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1305/changes>

Changes:

[dvryaboy] PIG-2890: Revert PIG-2578

[dvryaboy] PIG-2850: Pig should support loading macro files as resources stored in JAR files

[thejas] PIG-1314: Add DateTime Support to Pig (zjshen via thejas)

------------------------------------------
[...truncated 38468 lines...]
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] Shutting down DataNode 2
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:132)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/24 10:32:37 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/24 10:32:37 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/24 10:32:37 INFO ipc.Server: Stopping server on 40962
    [junit] 12/08/24 10:32:37 INFO ipc.Server: IPC Server handler 0 on 40962: exiting
    [junit] 12/08/24 10:32:37 INFO ipc.Server: IPC Server handler 2 on 40962: exiting
    [junit] 12/08/24 10:32:37 INFO ipc.Server: IPC Server handler 1 on 40962: exiting
    [junit] 12/08/24 10:32:37 INFO ipc.Server: Stopping IPC Server listener on 40962
    [junit] 12/08/24 10:32:37 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/24 10:32:37 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/24 10:32:37 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:42636, storageID=DS-1869401989-67.195.138.20-42636-1345803902336, infoPort=49159, ipcPort=40962):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/08/24 10:32:37 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/08/24 10:32:37 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/08/24 10:32:37 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/08/24 10:32:37 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:42636, storageID=DS-1869401989-67.195.138.20-42636-1345803902336, infoPort=49159, ipcPort=40962):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data5/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data6/current'}>
    [junit] 12/08/24 10:32:37 INFO ipc.Server: Stopping server on 40962
    [junit] 12/08/24 10:32:37 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/24 10:32:37 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/24 10:32:37 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/08/24 10:32:37 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/08/24 10:32:37 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1707649330
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1707649330
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] Shutting down DataNode 1
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:132)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/24 10:32:37 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/24 10:32:37 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/24 10:32:37 INFO ipc.Server: Stopping server on 39888
    [junit] 12/08/24 10:32:37 INFO ipc.Server: IPC Server handler 1 on 39888: exiting
    [junit] 12/08/24 10:32:37 INFO ipc.Server: IPC Server handler 0 on 39888: exiting
    [junit] 12/08/24 10:32:37 INFO ipc.Server: IPC Server handler 2 on 39888: exiting
    [junit] 12/08/24 10:32:37 INFO ipc.Server: Stopping IPC Server listener on 39888
    [junit] 12/08/24 10:32:37 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/24 10:32:37 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/24 10:32:37 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/08/24 10:32:37 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:49736, storageID=DS-1560659827-67.195.138.20-49736-1345803901937, infoPort=40200, ipcPort=39888):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/08/24 10:32:37 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/08/24 10:32:37 INFO mapred.TaskTracker: Received 'KillJobAction' for job: job_20120824102502830_0012
    [junit] 12/08/24 10:32:37 WARN mapred.TaskTracker: Unknown job job_20120824102502830_0012 being deleted.
    [junit] 12/08/24 10:32:38 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/08/24 10:32:38 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:49736, storageID=DS-1560659827-67.195.138.20-49736-1345803901937, infoPort=40200, ipcPort=39888):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data3/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data4/current'}>
    [junit] 12/08/24 10:32:38 INFO ipc.Server: Stopping server on 39888
    [junit] 12/08/24 10:32:38 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/24 10:32:38 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/24 10:32:38 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/08/24 10:32:38 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/08/24 10:32:38 INFO mapred.TaskTracker: Received 'KillJobAction' for job: job_20120824102502830_0012
    [junit] 12/08/24 10:32:38 WARN mapred.TaskTracker: Unknown job job_20120824102502830_0012 being deleted.
    [junit] 12/08/24 10:32:38 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/24 10:32:38 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1971416618
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1971416618
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:132)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/24 10:32:38 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] Shutting down DataNode 0
    [junit] 12/08/24 10:32:38 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/24 10:32:38 INFO ipc.Server: Stopping server on 53767
    [junit] 12/08/24 10:32:38 INFO ipc.Server: IPC Server handler 0 on 53767: exiting
    [junit] 12/08/24 10:32:38 INFO ipc.Server: IPC Server handler 2 on 53767: exiting
    [junit] 12/08/24 10:32:38 INFO ipc.Server: IPC Server handler 1 on 53767: exiting
    [junit] 12/08/24 10:32:38 INFO ipc.Server: Stopping IPC Server listener on 53767
    [junit] 12/08/24 10:32:38 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/24 10:32:38 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/24 10:32:38 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/08/24 10:32:38 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:56811, storageID=DS-2130199663-67.195.138.20-56811-1345803901507, infoPort=46697, ipcPort=53767):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/08/24 10:32:38 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/08/24 10:32:39 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:49736 to delete  blk_-1281551604565521163_1101
    [junit] 12/08/24 10:32:39 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:56811 to delete  blk_-7135034011129547939_1102 blk_5791569289701386231_1095 blk_-2888495380701777565_1102
    [junit] 12/08/24 10:32:39 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/08/24 10:32:39 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/24 10:32:39 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:56811, storageID=DS-2130199663-67.195.138.20-56811-1345803901507, infoPort=46697, ipcPort=53767):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data1/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data2/current'}>
    [junit] 12/08/24 10:32:39 WARN util.MBeans: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.unRegisterMXBean(DataNode.java:513)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:726)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.run(DataNode.java:1442)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/08/24 10:32:39 INFO ipc.Server: Stopping server on 53767
    [junit] 12/08/24 10:32:39 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/24 10:32:39 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/24 10:32:39 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/08/24 10:32:39 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/08/24 10:32:39 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1285310668
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1285310668
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:132)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/24 10:32:39 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/24 10:32:39 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/24 10:32:39 WARN namenode.FSNamesystem: ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 12/08/24 10:32:39 INFO namenode.DecommissionManager: Interrupted Monitor
    [junit] java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.hdfs.server.namenode.DecommissionManager$Monitor.run(DecommissionManager.java:65)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/08/24 10:32:39 INFO namenode.FSNamesystem: Number of transactions: 502 Total time for transactions(ms): 7Number of transactions batched in Syncs: 150 Number of syncs: 349 SyncTimes(ms): 5218 206 
    [junit] 12/08/24 10:32:39 INFO ipc.Server: Stopping server on 47020
    [junit] 12/08/24 10:32:39 INFO ipc.Server: IPC Server handler 0 on 47020: exiting
    [junit] 12/08/24 10:32:39 INFO ipc.Server: IPC Server handler 1 on 47020: exiting
    [junit] 12/08/24 10:32:39 INFO ipc.Server: IPC Server handler 2 on 47020: exiting
    [junit] 12/08/24 10:32:39 INFO ipc.Server: IPC Server handler 4 on 47020: exiting
    [junit] 12/08/24 10:32:39 INFO ipc.Server: IPC Server handler 3 on 47020: exiting
    [junit] 12/08/24 10:32:39 INFO ipc.Server: IPC Server handler 5 on 47020: exiting
    [junit] 12/08/24 10:32:39 INFO ipc.Server: IPC Server handler 6 on 47020: exiting
    [junit] 12/08/24 10:32:39 INFO ipc.Server: IPC Server handler 8 on 47020: exiting
    [junit] 12/08/24 10:32:39 INFO ipc.Server: Stopping IPC Server listener on 47020
    [junit] 12/08/24 10:32:39 INFO ipc.Server: IPC Server handler 7 on 47020: exiting
    [junit] 12/08/24 10:32:39 INFO ipc.Server: IPC Server handler 9 on 47020: exiting
    [junit] 12/08/24 10:32:39 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/24 10:32:39 INFO ipc.Server: Stopping IPC Server Responder
    [junit] Tests run: 17, Failures: 3, Errors: 3, Time elapsed: 451.609 sec
    [junit] Test org.apache.pig.test.TestStore FAILED
    [junit] Running org.apache.pig.test.TestStringUDFs
    [junit] 12/08/24 10:32:40 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.NullPointerException
    [junit] 12/08/24 10:32:40 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/08/24 10:32:40 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -1
    [junit] 12/08/24 10:32:40 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -8
    [junit] 12/08/24 10:32:40 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/08/24 10:32:40 WARN builtin.INDEXOF: No logger object provided to UDF: org.apache.pig.builtin.INDEXOF. Failed to process input; error - null
    [junit] 12/08/24 10:32:40 WARN builtin.LAST_INDEX_OF: No logger object provided to UDF: org.apache.pig.builtin.LAST_INDEX_OF. Failed to process input; error - null
    [junit] Tests run: 11, Failures: 0, Errors: 0, Time elapsed: 0.228 sec
   [delete] Deleting directory /tmp/pig_junit_tmp2105234032

BUILD FAILED
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:793: The following error occurred while executing this line:
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:861: Tests failed!

Total time: 20 minutes 7 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1304

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1304/changes>

Changes:

[julien] PIG-2848: TestBuiltInBagToTupleOrString fails now that mock.Storage enforces not overwriting output (julien)

------------------------------------------
[...truncated 36576 lines...]
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:129)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] Shutting down DataNode 2
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/23 22:32:43 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/23 22:32:43 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/23 22:32:43 INFO ipc.Server: Stopping server on 33025
    [junit] 12/08/23 22:32:43 INFO ipc.Server: Stopping IPC Server listener on 33025
    [junit] 12/08/23 22:32:43 INFO ipc.Server: IPC Server handler 0 on 33025: exiting
    [junit] 12/08/23 22:32:43 INFO ipc.Server: IPC Server handler 1 on 33025: exiting
    [junit] 12/08/23 22:32:43 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/23 22:32:43 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/23 22:32:43 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/08/23 22:32:43 INFO ipc.Server: IPC Server handler 2 on 33025: exiting
    [junit] 12/08/23 22:32:43 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:60748, storageID=DS-1471663223-67.195.138.20-60748-1345760712835, infoPort=38391, ipcPort=33025):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/08/23 22:32:43 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/08/23 22:32:43 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:57428 to delete  blk_-4149032668565847939_1078 blk_-1353532049794229177_1073
    [junit] 12/08/23 22:32:43 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:45131 to delete  blk_-4149032668565847939_1078
    [junit] 12/08/23 22:32:43 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/08/23 22:32:44 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/23 22:32:44 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:60748, storageID=DS-1471663223-67.195.138.20-60748-1345760712835, infoPort=38391, ipcPort=33025):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data5/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data6/current'}>
    [junit] 12/08/23 22:32:44 INFO ipc.Server: Stopping server on 33025
    [junit] 12/08/23 22:32:44 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/23 22:32:44 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/23 22:32:44 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/08/23 22:32:44 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/08/23 22:32:44 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId261759807
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId261759807
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:129)
    [junit] Shutting down DataNode 1
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/23 22:32:44 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/23 22:32:44 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/23 22:32:44 INFO ipc.Server: Stopping server on 57471
    [junit] 12/08/23 22:32:44 INFO ipc.Server: IPC Server handler 0 on 57471: exiting
    [junit] 12/08/23 22:32:44 INFO ipc.Server: IPC Server handler 2 on 57471: exiting
    [junit] 12/08/23 22:32:44 INFO ipc.Server: IPC Server handler 1 on 57471: exiting
    [junit] 12/08/23 22:32:44 INFO ipc.Server: Stopping IPC Server listener on 57471
    [junit] 12/08/23 22:32:44 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/23 22:32:44 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/23 22:32:44 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/08/23 22:32:44 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:45131, storageID=DS-1461682372-67.195.138.20-45131-1345760712445, infoPort=35150, ipcPort=57471):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/08/23 22:32:44 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/08/23 22:32:45 INFO datanode.DataNode: Scheduling block blk_-4149032668565847939_1078 file build/test/data/dfs/data/data2/current/blk_-4149032668565847939 for deletion
    [junit] 12/08/23 22:32:45 INFO datanode.DataNode: Scheduling block blk_-1353532049794229177_1073 file build/test/data/dfs/data/data2/current/blk_-1353532049794229177 for deletion
    [junit] 12/08/23 22:32:45 INFO datanode.DataNode: Deleted block blk_-4149032668565847939_1078 at file build/test/data/dfs/data/data2/current/blk_-4149032668565847939
    [junit] 12/08/23 22:32:45 INFO datanode.DataNode: Deleted block blk_-1353532049794229177_1073 at file build/test/data/dfs/data/data2/current/blk_-1353532049794229177
    [junit] 12/08/23 22:32:45 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/08/23 22:32:45 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:45131, storageID=DS-1461682372-67.195.138.20-45131-1345760712445, infoPort=35150, ipcPort=57471):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data3/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data4/current'}>
    [junit] 12/08/23 22:32:45 INFO ipc.Server: Stopping server on 57471
    [junit] 12/08/23 22:32:45 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/23 22:32:45 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/23 22:32:45 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/08/23 22:32:45 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/08/23 22:32:45 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/23 22:32:45 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1864756127
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1864756127
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:129)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] Shutting down DataNode 0
    [junit] 12/08/23 22:32:45 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/23 22:32:45 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/23 22:32:45 INFO ipc.Server: Stopping server on 48700
    [junit] 12/08/23 22:32:45 INFO ipc.Server: IPC Server handler 0 on 48700: exiting
    [junit] 12/08/23 22:32:45 INFO ipc.Server: IPC Server handler 2 on 48700: exiting
    [junit] 12/08/23 22:32:45 INFO ipc.Server: Stopping IPC Server listener on 48700
    [junit] 12/08/23 22:32:45 INFO ipc.Server: IPC Server handler 1 on 48700: exiting
    [junit] 12/08/23 22:32:45 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/23 22:32:45 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/23 22:32:45 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/08/23 22:32:45 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:57428, storageID=DS-1013635987-67.195.138.20-57428-1345760711997, infoPort=33893, ipcPort=48700):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/08/23 22:32:45 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/08/23 22:32:46 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/08/23 22:32:46 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:60748 to delete  blk_-1353532049794229177_1073
    [junit] 12/08/23 22:32:46 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:39330 to delete  blk_-4149032668565847939_1078 blk_-1353532049794229177_1073
    [junit] 12/08/23 22:32:46 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/23 22:32:46 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:57428, storageID=DS-1013635987-67.195.138.20-57428-1345760711997, infoPort=33893, ipcPort=48700):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data1/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data2/current'}>
    [junit] 12/08/23 22:32:46 WARN util.MBeans: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.unRegisterMXBean(DataNode.java:513)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:726)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.run(DataNode.java:1442)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/08/23 22:32:46 INFO ipc.Server: Stopping server on 48700
    [junit] 12/08/23 22:32:46 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/23 22:32:46 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/23 22:32:46 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/08/23 22:32:46 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/08/23 22:32:46 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-25511417
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-25511417
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:129)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/23 22:32:46 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/23 22:32:46 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/23 22:32:46 WARN namenode.FSNamesystem: ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 12/08/23 22:32:46 INFO namenode.DecommissionManager: Interrupted Monitor
    [junit] java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.hdfs.server.namenode.DecommissionManager$Monitor.run(DecommissionManager.java:65)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/08/23 22:32:46 INFO namenode.FSNamesystem: Number of transactions: 382 Total time for transactions(ms): 4Number of transactions batched in Syncs: 147 Number of syncs: 289 SyncTimes(ms): 4104 283 
    [junit] 12/08/23 22:32:46 INFO ipc.Server: Stopping server on 59520
    [junit] 12/08/23 22:32:46 INFO ipc.Server: IPC Server handler 0 on 59520: exiting
    [junit] 12/08/23 22:32:46 INFO ipc.Server: IPC Server handler 1 on 59520: exiting
    [junit] 12/08/23 22:32:46 INFO ipc.Server: IPC Server handler 2 on 59520: exiting
    [junit] 12/08/23 22:32:46 INFO ipc.Server: IPC Server handler 3 on 59520: exiting
    [junit] 12/08/23 22:32:46 INFO ipc.Server: IPC Server handler 5 on 59520: exiting
    [junit] 12/08/23 22:32:46 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/23 22:32:46 INFO ipc.Server: IPC Server handler 4 on 59520: exiting
    [junit] 12/08/23 22:32:46 INFO ipc.Server: IPC Server handler 9 on 59520: exiting
    [junit] 12/08/23 22:32:46 INFO ipc.Server: IPC Server handler 8 on 59520: exiting
    [junit] 12/08/23 22:32:46 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/23 22:32:46 INFO ipc.Server: IPC Server handler 7 on 59520: exiting
    [junit] 12/08/23 22:32:46 INFO ipc.Server: IPC Server handler 6 on 59520: exiting
    [junit] 12/08/23 22:32:46 INFO ipc.Server: Stopping IPC Server listener on 59520
    [junit] Tests run: 17, Failures: 3, Errors: 3, Time elapsed: 448.162 sec
    [junit] Test org.apache.pig.test.TestStore FAILED
    [junit] Running org.apache.pig.test.TestStringUDFs
    [junit] 12/08/23 22:32:48 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.NullPointerException
    [junit] 12/08/23 22:32:48 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/08/23 22:32:48 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -1
    [junit] 12/08/23 22:32:48 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -8
    [junit] 12/08/23 22:32:48 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/08/23 22:32:48 WARN builtin.INDEXOF: No logger object provided to UDF: org.apache.pig.builtin.INDEXOF. Failed to process input; error - null
    [junit] 12/08/23 22:32:48 WARN builtin.LAST_INDEX_OF: No logger object provided to UDF: org.apache.pig.builtin.LAST_INDEX_OF. Failed to process input; error - null
    [junit] Tests run: 11, Failures: 0, Errors: 0, Time elapsed: 0.242 sec
   [delete] Deleting directory /tmp/pig_junit_tmp378869161

BUILD FAILED
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:793: The following error occurred while executing this line:
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:861: Tests failed!

Total time: 19 minutes 59 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1303

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1303/changes>

Changes:

[sms] PIG-2785: NoClassDefFoundError after upgrading to pig 0.10.0 from 0.9.0 (matterhayes via sms)

------------------------------------------
[...truncated 38402 lines...]
    [junit] Shutting down DataNode 2
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:129)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/22 10:32:07 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/22 10:32:07 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/22 10:32:07 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:34901 to delete  blk_7601099318539377849_1073 blk_-7694329299919852436_1078
    [junit] 12/08/22 10:32:07 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:56063 to delete  blk_-7694329299919852436_1078
    [junit] 12/08/22 10:32:07 INFO ipc.Server: Stopping server on 47428
    [junit] 12/08/22 10:32:07 INFO ipc.Server: Stopping IPC Server listener on 47428
    [junit] 12/08/22 10:32:07 INFO ipc.Server: IPC Server handler 2 on 47428: exiting
    [junit] 12/08/22 10:32:07 INFO ipc.Server: IPC Server handler 0 on 47428: exiting
    [junit] 12/08/22 10:32:07 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/22 10:32:07 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/08/22 10:32:07 INFO ipc.Server: IPC Server handler 1 on 47428: exiting
    [junit] 12/08/22 10:32:07 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/22 10:32:07 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:34901, storageID=DS-1913447332-67.195.138.20-34901-1345631067245, infoPort=46324, ipcPort=47428):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/08/22 10:32:07 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/08/22 10:32:07 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/08/22 10:32:08 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/22 10:32:08 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:34901, storageID=DS-1913447332-67.195.138.20-34901-1345631067245, infoPort=46324, ipcPort=47428):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data5/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data6/current'}>
    [junit] 12/08/22 10:32:08 INFO ipc.Server: Stopping server on 47428
    [junit] 12/08/22 10:32:08 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/22 10:32:08 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/22 10:32:08 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/08/22 10:32:08 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/08/22 10:32:08 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId210389432
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId210389432
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:129)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] Shutting down DataNode 1
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/22 10:32:08 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/22 10:32:08 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/22 10:32:08 INFO ipc.Server: Stopping server on 45144
    [junit] 12/08/22 10:32:08 INFO ipc.Server: IPC Server handler 0 on 45144: exiting
    [junit] 12/08/22 10:32:08 INFO ipc.Server: IPC Server handler 2 on 45144: exiting
    [junit] 12/08/22 10:32:08 INFO ipc.Server: Stopping IPC Server listener on 45144
    [junit] 12/08/22 10:32:08 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/22 10:32:08 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/22 10:32:08 INFO ipc.Server: IPC Server handler 1 on 45144: exiting
    [junit] 12/08/22 10:32:08 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:33499, storageID=DS-1085836420-67.195.138.20-33499-1345631066868, infoPort=48983, ipcPort=45144):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/08/22 10:32:08 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/08/22 10:32:08 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/08/22 10:32:08 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/08/22 10:32:09 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:33499, storageID=DS-1085836420-67.195.138.20-33499-1345631066868, infoPort=48983, ipcPort=45144):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data3/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data4/current'}>
    [junit] 12/08/22 10:32:09 INFO ipc.Server: Stopping server on 45144
    [junit] 12/08/22 10:32:09 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/22 10:32:09 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/22 10:32:09 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/08/22 10:32:09 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/08/22 10:32:09 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/22 10:32:09 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1634050607
    [junit] Shutting down DataNode 0
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1634050607
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:129)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/22 10:32:09 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/22 10:32:09 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/22 10:32:09 INFO ipc.Server: Stopping server on 58779
    [junit] 12/08/22 10:32:09 INFO ipc.Server: IPC Server handler 0 on 58779: exiting
    [junit] 12/08/22 10:32:09 INFO ipc.Server: IPC Server handler 2 on 58779: exiting
    [junit] 12/08/22 10:32:09 INFO ipc.Server: IPC Server handler 1 on 58779: exiting
    [junit] 12/08/22 10:32:09 INFO ipc.Server: Stopping IPC Server listener on 58779
    [junit] 12/08/22 10:32:09 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/22 10:32:09 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/22 10:32:09 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:35125, storageID=DS-2132405853-67.195.138.20-35125-1345631066443, infoPort=60813, ipcPort=58779):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/08/22 10:32:09 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/08/22 10:32:09 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/22 10:32:09 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/08/22 10:32:09 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:35125, storageID=DS-2132405853-67.195.138.20-35125-1345631066443, infoPort=60813, ipcPort=58779):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data1/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data2/current'}>
    [junit] 12/08/22 10:32:09 WARN util.MBeans: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.unRegisterMXBean(DataNode.java:513)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:726)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.run(DataNode.java:1442)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/08/22 10:32:09 INFO ipc.Server: Stopping server on 58779
    [junit] 12/08/22 10:32:09 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/22 10:32:09 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/22 10:32:09 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/08/22 10:32:09 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/08/22 10:32:09 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-223680784
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-223680784
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:129)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/22 10:32:09 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/22 10:32:09 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/22 10:32:09 INFO namenode.DecommissionManager: Interrupted Monitor
    [junit] java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.hdfs.server.namenode.DecommissionManager$Monitor.run(DecommissionManager.java:65)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/08/22 10:32:09 WARN namenode.FSNamesystem: ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 12/08/22 10:32:09 INFO namenode.FSNamesystem: Number of transactions: 382 Total time for transactions(ms): 8Number of transactions batched in Syncs: 147 Number of syncs: 288 SyncTimes(ms): 3828 196 
    [junit] 12/08/22 10:32:09 INFO ipc.Server: Stopping server on 59197
    [junit] 12/08/22 10:32:09 INFO ipc.Server: IPC Server handler 1 on 59197: exiting
    [junit] 12/08/22 10:32:09 INFO ipc.Server: IPC Server handler 0 on 59197: exiting
    [junit] 12/08/22 10:32:09 INFO ipc.Server: IPC Server handler 3 on 59197: exiting
    [junit] 12/08/22 10:32:09 INFO ipc.Server: IPC Server handler 2 on 59197: exiting
    [junit] 12/08/22 10:32:09 INFO ipc.Server: IPC Server handler 4 on 59197: exiting
    [junit] 12/08/22 10:32:09 INFO ipc.Server: IPC Server handler 5 on 59197: exiting
    [junit] 12/08/22 10:32:09 INFO ipc.Server: IPC Server handler 8 on 59197: exiting
    [junit] 12/08/22 10:32:09 INFO ipc.Server: IPC Server handler 7 on 59197: exiting
    [junit] 12/08/22 10:32:09 INFO ipc.Server: IPC Server handler 6 on 59197: exiting
    [junit] 12/08/22 10:32:09 INFO ipc.Server: IPC Server handler 9 on 59197: exiting
    [junit] 12/08/22 10:32:09 INFO ipc.Server: Stopping IPC Server listener on 59197
    [junit] 12/08/22 10:32:09 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/22 10:32:09 INFO metrics.RpcInstrumentation: shut down
    [junit] Tests run: 17, Failures: 3, Errors: 3, Time elapsed: 456.195 sec
    [junit] Test org.apache.pig.test.TestStore FAILED
    [junit] Running org.apache.pig.test.TestStringUDFs
    [junit] 12/08/22 10:32:10 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.NullPointerException
    [junit] 12/08/22 10:32:10 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/08/22 10:32:10 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -1
    [junit] 12/08/22 10:32:10 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -8
    [junit] 12/08/22 10:32:10 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/08/22 10:32:10 WARN builtin.INDEXOF: No logger object provided to UDF: org.apache.pig.builtin.INDEXOF. Failed to process input; error - null
    [junit] 12/08/22 10:32:10 WARN builtin.LAST_INDEX_OF: No logger object provided to UDF: org.apache.pig.builtin.LAST_INDEX_OF. Failed to process input; error - null
    [junit] Tests run: 11, Failures: 0, Errors: 0, Time elapsed: 0.228 sec
   [delete] Deleting directory /tmp/pig_junit_tmp781388068

BUILD FAILED
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:793: The following error occurred while executing this line:
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:861: Tests failed!

Total time: 20 minutes 22 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1302

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1302/changes>

Changes:

[billgraham] PIG-2884: JobControlCompiler mis-logs after reducer estimation (billgraham)

------------------------------------------
[...truncated 37564 lines...]
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:129)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/21 10:32:08 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/21 10:32:08 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] Shutting down DataNode 2
    [junit] 12/08/21 10:32:08 INFO ipc.Server: Stopping server on 58536
    [junit] 12/08/21 10:32:08 INFO ipc.Server: Stopping IPC Server listener on 58536
    [junit] 12/08/21 10:32:08 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/21 10:32:08 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/21 10:32:08 INFO ipc.Server: IPC Server handler 2 on 58536: exiting
    [junit] 12/08/21 10:32:08 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/08/21 10:32:08 INFO ipc.Server: IPC Server handler 1 on 58536: exiting
    [junit] 12/08/21 10:32:08 INFO ipc.Server: IPC Server handler 0 on 58536: exiting
    [junit] 12/08/21 10:32:08 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:54954, storageID=DS-17413196-67.195.138.20-54954-1345544668512, infoPort=60728, ipcPort=58536):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/08/21 10:32:08 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/08/21 10:32:08 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:36040 to delete  blk_-8346373089035590904_1073
    [junit] 12/08/21 10:32:08 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:45073 to delete  blk_-8346373089035590904_1073 blk_-7111554569731899084_1078
    [junit] 12/08/21 10:32:08 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/08/21 10:32:09 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/21 10:32:09 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:54954, storageID=DS-17413196-67.195.138.20-54954-1345544668512, infoPort=60728, ipcPort=58536):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data5/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data6/current'}>
    [junit] 12/08/21 10:32:09 INFO ipc.Server: Stopping server on 58536
    [junit] 12/08/21 10:32:09 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/21 10:32:09 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/21 10:32:09 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/08/21 10:32:09 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/08/21 10:32:09 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId111335716
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId111335716
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:129)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/21 10:32:09 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/21 10:32:09 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] Shutting down DataNode 1
    [junit] 12/08/21 10:32:09 INFO ipc.Server: Stopping server on 43201
    [junit] 12/08/21 10:32:09 INFO ipc.Server: IPC Server handler 1 on 43201: exiting
    [junit] 12/08/21 10:32:09 INFO ipc.Server: IPC Server handler 0 on 43201: exiting
    [junit] 12/08/21 10:32:09 INFO ipc.Server: IPC Server handler 2 on 43201: exiting
    [junit] 12/08/21 10:32:09 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/21 10:32:09 INFO ipc.Server: Stopping IPC Server listener on 43201
    [junit] 12/08/21 10:32:09 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/08/21 10:32:09 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/21 10:32:09 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:52860, storageID=DS-1006545832-67.195.138.20-52860-1345544668133, infoPort=48688, ipcPort=43201):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/08/21 10:32:09 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/08/21 10:32:09 INFO datanode.DataNode: Scheduling block blk_-8346373089035590904_1073 file build/test/data/dfs/data/data1/current/blk_-8346373089035590904 for deletion
    [junit] 12/08/21 10:32:09 INFO datanode.DataNode: Deleted block blk_-8346373089035590904_1073 at file build/test/data/dfs/data/data1/current/blk_-8346373089035590904
    [junit] 12/08/21 10:32:10 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/08/21 10:32:10 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:52860, storageID=DS-1006545832-67.195.138.20-52860-1345544668133, infoPort=48688, ipcPort=43201):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data3/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data4/current'}>
    [junit] 12/08/21 10:32:10 INFO ipc.Server: Stopping server on 43201
    [junit] 12/08/21 10:32:10 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/21 10:32:10 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/21 10:32:10 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/08/21 10:32:10 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/08/21 10:32:10 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/21 10:32:10 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId2050778545
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId2050778545
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] Shutting down DataNode 0
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:129)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/21 10:32:10 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/21 10:32:10 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/21 10:32:10 INFO ipc.Server: Stopping server on 35558
    [junit] 12/08/21 10:32:10 INFO ipc.Server: IPC Server handler 0 on 35558: exiting
    [junit] 12/08/21 10:32:10 INFO ipc.Server: IPC Server handler 2 on 35558: exiting
    [junit] 12/08/21 10:32:10 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/21 10:32:10 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/21 10:32:10 INFO ipc.Server: IPC Server handler 1 on 35558: exiting
    [junit] 12/08/21 10:32:10 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:36040, storageID=DS-796494576-67.195.138.20-36040-1345544667732, infoPort=36666, ipcPort=35558):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/08/21 10:32:10 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/08/21 10:32:10 INFO ipc.Server: Stopping IPC Server listener on 35558
    [junit] 12/08/21 10:32:10 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/08/21 10:32:10 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/08/21 10:32:10 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:36040, storageID=DS-796494576-67.195.138.20-36040-1345544667732, infoPort=36666, ipcPort=35558):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data1/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data2/current'}>
    [junit] 12/08/21 10:32:10 WARN util.MBeans: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.unRegisterMXBean(DataNode.java:513)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:726)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.run(DataNode.java:1442)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/08/21 10:32:10 INFO ipc.Server: Stopping server on 35558
    [junit] 12/08/21 10:32:10 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/21 10:32:10 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/21 10:32:10 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/08/21 10:32:10 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/08/21 10:32:10 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1275083204
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1275083204
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:129)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/21 10:32:10 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/21 10:32:10 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/21 10:32:10 WARN namenode.FSNamesystem: ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 12/08/21 10:32:10 INFO namenode.FSNamesystem: Number of transactions: 382 Total time for transactions(ms): 7Number of transactions batched in Syncs: 154 Number of syncs: 288 SyncTimes(ms): 2545 332 
    [junit] 12/08/21 10:32:10 INFO namenode.DecommissionManager: Interrupted Monitor
    [junit] java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.hdfs.server.namenode.DecommissionManager$Monitor.run(DecommissionManager.java:65)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/08/21 10:32:10 INFO ipc.Server: Stopping server on 42200
    [junit] 12/08/21 10:32:10 INFO ipc.Server: IPC Server handler 0 on 42200: exiting
    [junit] 12/08/21 10:32:10 INFO ipc.Server: IPC Server handler 1 on 42200: exiting
    [junit] 12/08/21 10:32:10 INFO ipc.Server: IPC Server handler 2 on 42200: exiting
    [junit] 12/08/21 10:32:10 INFO ipc.Server: IPC Server handler 3 on 42200: exiting
    [junit] 12/08/21 10:32:10 INFO ipc.Server: IPC Server handler 4 on 42200: exiting
    [junit] 12/08/21 10:32:10 INFO ipc.Server: IPC Server handler 5 on 42200: exiting
    [junit] 12/08/21 10:32:10 INFO ipc.Server: IPC Server handler 6 on 42200: exiting
    [junit] 12/08/21 10:32:10 INFO ipc.Server: IPC Server handler 7 on 42200: exiting
    [junit] 12/08/21 10:32:10 INFO ipc.Server: IPC Server handler 8 on 42200: exiting
    [junit] 12/08/21 10:32:10 INFO ipc.Server: IPC Server handler 9 on 42200: exiting
    [junit] 12/08/21 10:32:10 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/21 10:32:10 INFO ipc.Server: Stopping IPC Server listener on 42200
    [junit] 12/08/21 10:32:10 INFO ipc.Server: Stopping IPC Server Responder
    [junit] Tests run: 17, Failures: 3, Errors: 3, Time elapsed: 456.27 sec
    [junit] Test org.apache.pig.test.TestStore FAILED
    [junit] Running org.apache.pig.test.TestStringUDFs
    [junit] 12/08/21 10:32:11 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.NullPointerException
    [junit] 12/08/21 10:32:11 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/08/21 10:32:11 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -1
    [junit] 12/08/21 10:32:11 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -8
    [junit] 12/08/21 10:32:11 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/08/21 10:32:11 WARN builtin.INDEXOF: No logger object provided to UDF: org.apache.pig.builtin.INDEXOF. Failed to process input; error - null
    [junit] 12/08/21 10:32:11 WARN builtin.LAST_INDEX_OF: No logger object provided to UDF: org.apache.pig.builtin.LAST_INDEX_OF. Failed to process input; error - null
    [junit] Tests run: 11, Failures: 0, Errors: 0, Time elapsed: 0.236 sec
   [delete] Deleting directory /tmp/pig_junit_tmp284269024

BUILD FAILED
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:793: The following error occurred while executing this line:
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:861: Tests failed!

Total time: 20 minutes 11 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1301

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1301/changes>

Changes:

[dvryaboy] PIG-2556: CSVExcelStorage load: quoted field with newline as first character sees newline as record end

------------------------------------------
[...truncated 37789 lines...]
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:129)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/20 23:11:45 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] Shutting down DataNode 2
    [junit] 12/08/20 23:11:45 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/20 23:11:45 INFO ipc.Server: Stopping server on 45628
    [junit] 12/08/20 23:11:45 INFO ipc.Server: IPC Server handler 1 on 45628: exiting
    [junit] 12/08/20 23:11:45 INFO ipc.Server: IPC Server handler 0 on 45628: exiting
    [junit] 12/08/20 23:11:45 INFO ipc.Server: Stopping IPC Server listener on 45628
    [junit] 12/08/20 23:11:45 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/20 23:11:45 INFO ipc.Server: IPC Server handler 2 on 45628: exiting
    [junit] 12/08/20 23:11:45 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/20 23:11:45 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/08/20 23:11:45 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:51639, storageID=DS-1509213729-67.195.138.20-51639-1345503855082, infoPort=37835, ipcPort=45628):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/08/20 23:11:45 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/08/20 23:11:46 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:40689 to delete  blk_-7505850391209066335_1073
    [junit] 12/08/20 23:11:46 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:59704 to delete  blk_-7505850391209066335_1073 blk_5038225558055915183_1078
    [junit] 12/08/20 23:11:46 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/08/20 23:11:46 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/20 23:11:46 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:51639, storageID=DS-1509213729-67.195.138.20-51639-1345503855082, infoPort=37835, ipcPort=45628):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data5/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data6/current'}>
    [junit] 12/08/20 23:11:46 INFO ipc.Server: Stopping server on 45628
    [junit] 12/08/20 23:11:46 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/20 23:11:46 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/20 23:11:46 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/08/20 23:11:46 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/08/20 23:11:46 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-110420357
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-110420357
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:129)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/20 23:11:46 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/20 23:11:46 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] Shutting down DataNode 1
    [junit] 12/08/20 23:11:46 INFO ipc.Server: Stopping server on 38010
    [junit] 12/08/20 23:11:46 INFO ipc.Server: IPC Server handler 1 on 38010: exiting
    [junit] 12/08/20 23:11:46 INFO ipc.Server: IPC Server handler 2 on 38010: exiting
    [junit] 12/08/20 23:11:46 INFO ipc.Server: IPC Server handler 0 on 38010: exiting
    [junit] 12/08/20 23:11:46 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/20 23:11:46 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/20 23:11:46 INFO ipc.Server: Stopping IPC Server listener on 38010
    [junit] 12/08/20 23:11:46 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/08/20 23:11:46 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:40130, storageID=DS-1803478568-67.195.138.20-40130-1345503854708, infoPort=52195, ipcPort=38010):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/08/20 23:11:46 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/08/20 23:11:46 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/08/20 23:11:46 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:40130, storageID=DS-1803478568-67.195.138.20-40130-1345503854708, infoPort=52195, ipcPort=38010):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data3/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data4/current'}>
    [junit] 12/08/20 23:11:46 INFO ipc.Server: Stopping server on 38010
    [junit] 12/08/20 23:11:46 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/20 23:11:46 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/20 23:11:46 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/08/20 23:11:46 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/08/20 23:11:46 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1948528601
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1948528601
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] Shutting down DataNode 0
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:129)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/20 23:11:46 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/20 23:11:46 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/20 23:11:46 INFO ipc.Server: Stopping server on 58816
    [junit] 12/08/20 23:11:46 INFO ipc.Server: IPC Server handler 0 on 58816: exiting
    [junit] 12/08/20 23:11:46 INFO ipc.Server: IPC Server handler 2 on 58816: exiting
    [junit] 12/08/20 23:11:46 INFO ipc.Server: Stopping IPC Server listener on 58816
    [junit] 12/08/20 23:11:46 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/20 23:11:46 INFO ipc.Server: IPC Server handler 1 on 58816: exiting
    [junit] 12/08/20 23:11:46 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/20 23:11:46 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:59704, storageID=DS-1655005849-67.195.138.20-59704-1345503854301, infoPort=35533, ipcPort=58816):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/08/20 23:11:46 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/08/20 23:11:46 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/08/20 23:11:46 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/08/20 23:11:46 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:59704, storageID=DS-1655005849-67.195.138.20-59704-1345503854301, infoPort=35533, ipcPort=58816):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data1/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data2/current'}>
    [junit] 12/08/20 23:11:46 WARN util.MBeans: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.unRegisterMXBean(DataNode.java:513)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:726)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.run(DataNode.java:1442)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/08/20 23:11:46 INFO ipc.Server: Stopping server on 58816
    [junit] 12/08/20 23:11:46 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/20 23:11:46 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/20 23:11:46 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/08/20 23:11:46 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/08/20 23:11:46 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-2117893494
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-2117893494
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:129)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/20 23:11:46 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/20 23:11:46 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/20 23:11:46 WARN namenode.FSNamesystem: ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 12/08/20 23:11:46 INFO namenode.DecommissionManager: Interrupted Monitor
    [junit] java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.hdfs.server.namenode.DecommissionManager$Monitor.run(DecommissionManager.java:65)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/08/20 23:11:46 INFO namenode.FSNamesystem: Number of transactions: 382 Total time for transactions(ms): 13Number of transactions batched in Syncs: 147 Number of syncs: 288 SyncTimes(ms): 3255 174 
    [junit] 12/08/20 23:11:46 INFO ipc.Server: Stopping server on 41669
    [junit] 12/08/20 23:11:46 INFO ipc.Server: IPC Server handler 0 on 41669: exiting
    [junit] 12/08/20 23:11:46 INFO ipc.Server: IPC Server handler 1 on 41669: exiting
    [junit] 12/08/20 23:11:46 INFO ipc.Server: IPC Server handler 2 on 41669: exiting
    [junit] 12/08/20 23:11:46 INFO ipc.Server: IPC Server handler 3 on 41669: exiting
    [junit] 12/08/20 23:11:46 INFO ipc.Server: IPC Server handler 4 on 41669: exiting
    [junit] 12/08/20 23:11:46 INFO ipc.Server: IPC Server handler 5 on 41669: exiting
    [junit] 12/08/20 23:11:46 INFO ipc.Server: IPC Server handler 6 on 41669: exiting
    [junit] 12/08/20 23:11:46 INFO ipc.Server: IPC Server handler 7 on 41669: exiting
    [junit] 12/08/20 23:11:46 INFO ipc.Server: IPC Server handler 8 on 41669: exiting
    [junit] 12/08/20 23:11:46 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/20 23:11:46 INFO ipc.Server: IPC Server handler 9 on 41669: exiting
    [junit] 12/08/20 23:11:46 INFO ipc.Server: Stopping IPC Server listener on 41669
    [junit] 12/08/20 23:11:46 INFO ipc.Server: Stopping IPC Server Responder
    [junit] Tests run: 17, Failures: 3, Errors: 3, Time elapsed: 445.939 sec
    [junit] Test org.apache.pig.test.TestStore FAILED
    [junit] Running org.apache.pig.test.TestStringUDFs
    [junit] 12/08/20 23:11:48 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.NullPointerException
    [junit] 12/08/20 23:11:48 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/08/20 23:11:48 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -1
    [junit] 12/08/20 23:11:48 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -8
    [junit] 12/08/20 23:11:48 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/08/20 23:11:48 WARN builtin.INDEXOF: No logger object provided to UDF: org.apache.pig.builtin.INDEXOF. Failed to process input; error - null
    [junit] 12/08/20 23:11:48 WARN builtin.LAST_INDEX_OF: No logger object provided to UDF: org.apache.pig.builtin.LAST_INDEX_OF. Failed to process input; error - null
    [junit] Tests run: 11, Failures: 0, Errors: 0, Time elapsed: 0.227 sec
   [delete] Deleting directory /tmp/pig_junit_tmp1727400344

BUILD FAILED
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:793: The following error occurred while executing this line:
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:861: Tests failed!

Total time: 20 minutes 9 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1300

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1300/changes>

Changes:

[sms] PIG-2875: Add recursive record support to AvroStorage (cheolsoo via sms)

------------------------------------------
[...truncated 37260 lines...]
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:129)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] Shutting down DataNode 2
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/20 10:32:01 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/20 10:32:01 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/20 10:32:01 INFO ipc.Server: Stopping server on 35022
    [junit] 12/08/20 10:32:01 INFO ipc.Server: IPC Server handler 0 on 35022: exiting
    [junit] 12/08/20 10:32:01 INFO ipc.Server: IPC Server handler 2 on 35022: exiting
    [junit] 12/08/20 10:32:01 INFO ipc.Server: IPC Server handler 1 on 35022: exiting
    [junit] 12/08/20 10:32:01 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/20 10:32:01 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/20 10:32:01 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/08/20 10:32:01 INFO ipc.Server: Stopping IPC Server listener on 35022
    [junit] 12/08/20 10:32:01 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:48401, storageID=DS-608203159-67.195.138.20-48401-1345458249171, infoPort=45503, ipcPort=35022):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/08/20 10:32:01 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/08/20 10:32:02 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/08/20 10:32:02 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/20 10:32:02 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:48401, storageID=DS-608203159-67.195.138.20-48401-1345458249171, infoPort=45503, ipcPort=35022):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data5/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data6/current'}>
    [junit] 12/08/20 10:32:02 INFO ipc.Server: Stopping server on 35022
    [junit] 12/08/20 10:32:02 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/20 10:32:02 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/20 10:32:02 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/08/20 10:32:02 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/08/20 10:32:02 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1499804387
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1499804387
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:129)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/20 10:32:02 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/20 10:32:02 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] Shutting down DataNode 1
    [junit] 12/08/20 10:32:02 INFO ipc.Server: Stopping server on 39335
    [junit] 12/08/20 10:32:02 INFO ipc.Server: IPC Server handler 0 on 39335: exiting
    [junit] 12/08/20 10:32:02 INFO ipc.Server: IPC Server handler 1 on 39335: exiting
    [junit] 12/08/20 10:32:02 INFO ipc.Server: IPC Server handler 2 on 39335: exiting
    [junit] 12/08/20 10:32:02 INFO ipc.Server: Stopping IPC Server listener on 39335
    [junit] 12/08/20 10:32:02 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/20 10:32:02 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/20 10:32:02 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/08/20 10:32:02 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:44723, storageID=DS-535266378-67.195.138.20-44723-1345458248796, infoPort=39341, ipcPort=39335):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/08/20 10:32:02 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/08/20 10:32:02 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:44723, storageID=DS-535266378-67.195.138.20-44723-1345458248796, infoPort=39341, ipcPort=39335):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data3/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data4/current'}>
    [junit] 12/08/20 10:32:02 INFO ipc.Server: Stopping server on 39335
    [junit] 12/08/20 10:32:02 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/20 10:32:02 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/20 10:32:02 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/08/20 10:32:02 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/08/20 10:32:02 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/08/20 10:32:03 INFO mapred.TaskTracker: Received 'KillJobAction' for job: job_20120820102409624_0012
    [junit] 12/08/20 10:32:03 WARN mapred.TaskTracker: Unknown job job_20120820102409624_0012 being deleted.
    [junit] 12/08/20 10:32:03 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/20 10:32:03 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1335200122
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1335200122
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:129)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/20 10:32:03 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] Shutting down DataNode 0
    [junit] 12/08/20 10:32:03 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/20 10:32:04 INFO ipc.Server: Stopping server on 46739
    [junit] 12/08/20 10:32:04 INFO ipc.Server: IPC Server handler 1 on 46739: exiting
    [junit] 12/08/20 10:32:04 INFO ipc.Server: IPC Server handler 2 on 46739: exiting
    [junit] 12/08/20 10:32:04 INFO ipc.Server: IPC Server handler 0 on 46739: exiting
    [junit] 12/08/20 10:32:04 INFO ipc.Server: Stopping IPC Server listener on 46739
    [junit] 12/08/20 10:32:04 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/20 10:32:04 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/20 10:32:04 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:49338, storageID=DS-945192787-67.195.138.20-49338-1345458248418, infoPort=54284, ipcPort=46739):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/08/20 10:32:04 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/08/20 10:32:04 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/20 10:32:04 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/08/20 10:32:04 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:49338, storageID=DS-945192787-67.195.138.20-49338-1345458248418, infoPort=54284, ipcPort=46739):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data1/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data2/current'}>
    [junit] 12/08/20 10:32:04 WARN util.MBeans: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.unRegisterMXBean(DataNode.java:513)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:726)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.run(DataNode.java:1442)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/08/20 10:32:04 INFO ipc.Server: Stopping server on 46739
    [junit] 12/08/20 10:32:04 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/20 10:32:04 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/20 10:32:04 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/08/20 10:32:04 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/08/20 10:32:04 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1598203225
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1598203225
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:129)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/20 10:32:04 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/20 10:32:04 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/20 10:32:04 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:54708 to delete  blk_-3542139909622922525_1078 blk_-8023402714853128380_1073
    [junit] 12/08/20 10:32:04 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:49338 to delete  blk_-3542139909622922525_1078
    [junit] 12/08/20 10:32:04 WARN namenode.FSNamesystem: ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 12/08/20 10:32:04 INFO namenode.DecommissionManager: Interrupted Monitor
    [junit] java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.hdfs.server.namenode.DecommissionManager$Monitor.run(DecommissionManager.java:65)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/08/20 10:32:04 INFO namenode.FSNamesystem: Number of transactions: 382 Total time for transactions(ms): 3Number of transactions batched in Syncs: 153 Number of syncs: 289 SyncTimes(ms): 4551 229 
    [junit] 12/08/20 10:32:04 INFO ipc.Server: Stopping server on 42380
    [junit] 12/08/20 10:32:04 INFO ipc.Server: IPC Server handler 0 on 42380: exiting
    [junit] 12/08/20 10:32:04 INFO ipc.Server: IPC Server handler 2 on 42380: exiting
    [junit] 12/08/20 10:32:04 INFO ipc.Server: IPC Server handler 4 on 42380: exiting
    [junit] 12/08/20 10:32:04 INFO ipc.Server: IPC Server handler 5 on 42380: exiting
    [junit] 12/08/20 10:32:04 INFO ipc.Server: IPC Server handler 1 on 42380: exiting
    [junit] 12/08/20 10:32:04 INFO ipc.Server: IPC Server handler 3 on 42380: exiting
    [junit] 12/08/20 10:32:04 INFO ipc.Server: IPC Server handler 8 on 42380: exiting
    [junit] 12/08/20 10:32:04 INFO ipc.Server: IPC Server handler 7 on 42380: exiting
    [junit] 12/08/20 10:32:04 INFO ipc.Server: IPC Server handler 6 on 42380: exiting
    [junit] 12/08/20 10:32:04 INFO ipc.Server: IPC Server handler 9 on 42380: exiting
    [junit] 12/08/20 10:32:04 INFO ipc.Server: Stopping IPC Server listener on 42380
    [junit] 12/08/20 10:32:04 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/20 10:32:04 INFO ipc.Server: Stopping IPC Server Responder
    [junit] Tests run: 17, Failures: 3, Errors: 3, Time elapsed: 469.079 sec
    [junit] Test org.apache.pig.test.TestStore FAILED
    [junit] Running org.apache.pig.test.TestStringUDFs
    [junit] 12/08/20 10:32:05 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.NullPointerException
    [junit] 12/08/20 10:32:05 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/08/20 10:32:05 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -1
    [junit] 12/08/20 10:32:05 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -8
    [junit] 12/08/20 10:32:05 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/08/20 10:32:05 WARN builtin.INDEXOF: No logger object provided to UDF: org.apache.pig.builtin.INDEXOF. Failed to process input; error - null
    [junit] 12/08/20 10:32:05 WARN builtin.LAST_INDEX_OF: No logger object provided to UDF: org.apache.pig.builtin.LAST_INDEX_OF. Failed to process input; error - null
    [junit] Tests run: 11, Failures: 0, Errors: 0, Time elapsed: 0.227 sec
   [delete] Deleting directory /tmp/pig_junit_tmp630429645

BUILD FAILED
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:793: The following error occurred while executing this line:
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:861: Tests failed!

Total time: 20 minutes 14 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1299

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1299/changes>

Changes:

[thejas] PIG-2662: skew join does not honor its config parameters (rajesh.balamohan via thejas)

------------------------------------------
[...truncated 37421 lines...]
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:129)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/16 23:19:47 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] Shutting down DataNode 2
    [junit] 12/08/16 23:19:47 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/16 23:19:47 INFO ipc.Server: Stopping server on 43860
    [junit] 12/08/16 23:19:47 INFO ipc.Server: IPC Server handler 0 on 43860: exiting
    [junit] 12/08/16 23:19:47 INFO ipc.Server: IPC Server handler 2 on 43860: exiting
    [junit] 12/08/16 23:19:47 INFO ipc.Server: IPC Server handler 1 on 43860: exiting
    [junit] 12/08/16 23:19:47 INFO ipc.Server: Stopping IPC Server listener on 43860
    [junit] 12/08/16 23:19:47 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/16 23:19:47 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/16 23:19:47 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/08/16 23:19:47 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:47567, storageID=DS-1141731053-67.195.138.20-47567-1345158728251, infoPort=56642, ipcPort=43860):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/08/16 23:19:47 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/08/16 23:19:48 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:60158 to delete  blk_7472377375372200414_1078 blk_575971279202259882_1073
    [junit] 12/08/16 23:19:48 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:34278 to delete  blk_7472377375372200414_1078 blk_575971279202259882_1073
    [junit] 12/08/16 23:19:48 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/08/16 23:19:48 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/16 23:19:48 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:47567, storageID=DS-1141731053-67.195.138.20-47567-1345158728251, infoPort=56642, ipcPort=43860):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data5/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data6/current'}>
    [junit] 12/08/16 23:19:48 INFO ipc.Server: Stopping server on 43860
    [junit] 12/08/16 23:19:48 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/16 23:19:48 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/16 23:19:48 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/08/16 23:19:48 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/08/16 23:19:48 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1125268935
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1125268935
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] Shutting down DataNode 1
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:129)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/16 23:19:48 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/16 23:19:48 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/16 23:19:48 INFO ipc.Server: Stopping server on 36673
    [junit] 12/08/16 23:19:48 INFO ipc.Server: IPC Server handler 0 on 36673: exiting
    [junit] 12/08/16 23:19:48 INFO ipc.Server: IPC Server handler 1 on 36673: exiting
    [junit] 12/08/16 23:19:48 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/16 23:19:48 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/16 23:19:48 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/08/16 23:19:48 INFO ipc.Server: IPC Server handler 2 on 36673: exiting
    [junit] 12/08/16 23:19:48 INFO ipc.Server: Stopping IPC Server listener on 36673
    [junit] 12/08/16 23:19:48 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:53100, storageID=DS-323581161-67.195.138.20-53100-1345158727871, infoPort=55796, ipcPort=36673):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/08/16 23:19:48 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/08/16 23:19:48 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/08/16 23:19:49 INFO datanode.DataNode: Scheduling block blk_575971279202259882_1073 file build/test/data/dfs/data/data1/current/blk_575971279202259882 for deletion
    [junit] 12/08/16 23:19:49 INFO datanode.DataNode: Scheduling block blk_7472377375372200414_1078 file build/test/data/dfs/data/data1/current/blk_7472377375372200414 for deletion
    [junit] 12/08/16 23:19:49 INFO datanode.DataNode: Deleted block blk_575971279202259882_1073 at file build/test/data/dfs/data/data1/current/blk_575971279202259882
    [junit] 12/08/16 23:19:49 INFO datanode.DataNode: Deleted block blk_7472377375372200414_1078 at file build/test/data/dfs/data/data1/current/blk_7472377375372200414
    [junit] 12/08/16 23:19:49 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/16 23:19:49 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:53100, storageID=DS-323581161-67.195.138.20-53100-1345158727871, infoPort=55796, ipcPort=36673):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data3/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data4/current'}>
    [junit] 12/08/16 23:19:49 INFO ipc.Server: Stopping server on 36673
    [junit] 12/08/16 23:19:49 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/16 23:19:49 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/16 23:19:49 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/08/16 23:19:49 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/08/16 23:19:49 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1149228619
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1149228619
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:129)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/16 23:19:49 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] Shutting down DataNode 0
    [junit] 12/08/16 23:19:49 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/16 23:19:49 INFO ipc.Server: Stopping server on 39182
    [junit] 12/08/16 23:19:49 INFO ipc.Server: IPC Server handler 0 on 39182: exiting
    [junit] 12/08/16 23:19:49 INFO ipc.Server: IPC Server handler 2 on 39182: exiting
    [junit] 12/08/16 23:19:49 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/16 23:19:49 INFO ipc.Server: Stopping IPC Server listener on 39182
    [junit] 12/08/16 23:19:49 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/16 23:19:49 INFO ipc.Server: IPC Server handler 1 on 39182: exiting
    [junit] 12/08/16 23:19:49 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:34278, storageID=DS-299221205-67.195.138.20-34278-1345158727495, infoPort=57385, ipcPort=39182):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/08/16 23:19:49 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/08/16 23:19:49 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/08/16 23:19:50 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/16 23:19:51 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:53100 to delete  blk_7472377375372200414_1078 blk_575971279202259882_1073
    [junit] 12/08/16 23:19:52 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:34278, storageID=DS-299221205-67.195.138.20-34278-1345158727495, infoPort=57385, ipcPort=39182):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data1/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data2/current'}>
    [junit] 12/08/16 23:19:52 WARN util.MBeans: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.unRegisterMXBean(DataNode.java:513)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:726)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.run(DataNode.java:1442)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/08/16 23:19:52 INFO ipc.Server: Stopping server on 39182
    [junit] 12/08/16 23:19:52 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/16 23:19:52 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/16 23:19:54 INFO datanode.DataBlockScanner: Verification succeeded for blk_-7678536271973460431_1062
    [junit] 12/08/16 23:19:54 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/08/16 23:19:54 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/08/16 23:19:54 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/08/16 23:19:54 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-169571750
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-169571750
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:129)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/16 23:19:54 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/16 23:19:54 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/16 23:19:54 WARN namenode.FSNamesystem: ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 12/08/16 23:19:54 INFO namenode.DecommissionManager: Interrupted Monitor
    [junit] java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.hdfs.server.namenode.DecommissionManager$Monitor.run(DecommissionManager.java:65)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/08/16 23:19:54 INFO namenode.FSNamesystem: Number of transactions: 382 Total time for transactions(ms): 4Number of transactions batched in Syncs: 144 Number of syncs: 289 SyncTimes(ms): 3431 275 
    [junit] 12/08/16 23:19:54 INFO ipc.Server: Stopping server on 60433
    [junit] 12/08/16 23:19:54 INFO ipc.Server: Stopping IPC Server listener on 60433
    [junit] 12/08/16 23:19:54 INFO ipc.Server: IPC Server handler 0 on 60433: exiting
    [junit] 12/08/16 23:19:54 INFO ipc.Server: IPC Server handler 7 on 60433: exiting
    [junit] 12/08/16 23:19:54 INFO ipc.Server: IPC Server handler 9 on 60433: exiting
    [junit] 12/08/16 23:19:54 INFO ipc.Server: IPC Server handler 1 on 60433: exiting
    [junit] 12/08/16 23:19:54 INFO ipc.Server: IPC Server handler 2 on 60433: exiting
    [junit] 12/08/16 23:19:54 INFO ipc.Server: IPC Server handler 3 on 60433: exiting
    [junit] 12/08/16 23:19:54 INFO ipc.Server: IPC Server handler 4 on 60433: exiting
    [junit] 12/08/16 23:19:54 INFO ipc.Server: IPC Server handler 5 on 60433: exiting
    [junit] 12/08/16 23:19:54 INFO ipc.Server: IPC Server handler 8 on 60433: exiting
    [junit] 12/08/16 23:19:54 INFO ipc.Server: IPC Server handler 6 on 60433: exiting
    [junit] 12/08/16 23:19:54 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/16 23:19:54 INFO ipc.Server: Stopping IPC Server Responder
    [junit] Tests run: 17, Failures: 3, Errors: 3, Time elapsed: 460.161 sec
    [junit] Test org.apache.pig.test.TestStore FAILED
    [junit] Running org.apache.pig.test.TestStringUDFs
    [junit] 12/08/16 23:19:55 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.NullPointerException
    [junit] 12/08/16 23:19:55 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/08/16 23:19:55 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -1
    [junit] 12/08/16 23:19:55 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -8
    [junit] 12/08/16 23:19:55 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/08/16 23:19:55 WARN builtin.INDEXOF: No logger object provided to UDF: org.apache.pig.builtin.INDEXOF. Failed to process input; error - null
    [junit] 12/08/16 23:19:55 WARN builtin.LAST_INDEX_OF: No logger object provided to UDF: org.apache.pig.builtin.LAST_INDEX_OF. Failed to process input; error - null
    [junit] Tests run: 11, Failures: 0, Errors: 0, Time elapsed: 0.226 sec
   [delete] Deleting directory /tmp/pig_junit_tmp1642160311

BUILD FAILED
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:793: The following error occurred while executing this line:
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:861: Tests failed!

Total time: 20 minutes 8 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1298

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1298/changes>

Changes:

[jcoveney] PIG-2876: Bump up Xerces version (jcoveney)

[billgraham] PIG-2871: Refactor signature for PigReducerEstimator (billgraham)

[billgraham] PIG-2866: PigServer fails with macros without a script file (billgraham)

------------------------------------------
[...truncated 6468 lines...]
 [findbugs]   org.apache.hadoop.util.RunJar
 [findbugs]   org.jruby.RubyBoolean
 [findbugs]   org.apache.hadoop.mapred.Counters$Group
 [findbugs]   com.jcraft.jsch.ChannelExec
 [findbugs]   org.apache.hadoop.hbase.util.Base64
 [findbugs]   org.antlr.runtime.TokenStream
 [findbugs]   org.apache.hadoop.io.IOUtils
 [findbugs]   org.jruby.RubyBignum
 [findbugs]   com.google.common.util.concurrent.CheckedFuture
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Reader$Scanner$Entry
 [findbugs]   org.apache.hadoop.fs.FSDataInputStream
 [findbugs]   org.python.core.PyObject
 [findbugs]   jline.History
 [findbugs]   org.jruby.embed.internal.LocalContextProvider
 [findbugs]   org.apache.hadoop.io.BooleanWritable
 [findbugs]   org.apache.log4j.Logger
 [findbugs]   org.apache.hadoop.hbase.filter.FamilyFilter
 [findbugs]   groovy.lang.Tuple
 [findbugs]   org.antlr.runtime.IntStream
 [findbugs]   org.apache.hadoop.util.ReflectionUtils
 [findbugs]   org.apache.hadoop.fs.ContentSummary
 [findbugs]   org.jruby.runtime.builtin.IRubyObject
 [findbugs]   org.jruby.RubyInteger
 [findbugs]   org.python.core.PyTuple
 [findbugs]   org.mortbay.log.Log
 [findbugs]   org.apache.hadoop.conf.Configuration
 [findbugs]   com.google.common.base.Joiner
 [findbugs]   org.apache.hadoop.mapreduce.lib.input.FileSplit
 [findbugs]   org.apache.hadoop.mapred.Counters$Counter
 [findbugs]   com.jcraft.jsch.Channel
 [findbugs]   org.apache.hadoop.mapred.JobPriority
 [findbugs]   org.apache.commons.cli.Options
 [findbugs]   org.apache.hadoop.mapred.JobID
 [findbugs]   org.apache.hadoop.util.bloom.BloomFilter
 [findbugs]   org.python.core.PyFrame
 [findbugs]   org.apache.hadoop.hbase.filter.CompareFilter
 [findbugs]   org.apache.hadoop.util.VersionInfo
 [findbugs]   org.python.core.PyString
 [findbugs]   org.apache.hadoop.io.Text$Comparator
 [findbugs]   org.jruby.runtime.Block
 [findbugs]   org.antlr.runtime.MismatchedSetException
 [findbugs]   org.apache.hadoop.io.BytesWritable
 [findbugs]   org.apache.hadoop.fs.FsShell
 [findbugs]   org.mozilla.javascript.ImporterTopLevel
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableOutputFormat
 [findbugs]   org.apache.hadoop.mapred.TaskReport
 [findbugs]   org.antlr.runtime.tree.RewriteRuleSubtreeStream
 [findbugs]   org.apache.commons.cli.HelpFormatter
 [findbugs]   com.google.common.collect.Maps
 [findbugs]   org.mozilla.javascript.NativeObject
 [findbugs]   org.apache.hadoop.hbase.HConstants
 [findbugs]   org.apache.hadoop.io.serializer.Deserializer
 [findbugs]   org.antlr.runtime.FailedPredicateException
 [findbugs]   org.apache.hadoop.io.compress.CompressionCodec
 [findbugs]   org.jruby.RubyNil
 [findbugs]   org.apache.hadoop.fs.FileStatus
 [findbugs]   org.apache.hadoop.hbase.client.Result
 [findbugs]   org.apache.hadoop.mapreduce.JobContext
 [findbugs]   org.codehaus.jackson.JsonGenerator
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptContext
 [findbugs]   org.apache.hadoop.io.BytesWritable$Comparator
 [findbugs]   org.apache.hadoop.io.LongWritable$Comparator
 [findbugs]   org.codehaus.jackson.map.util.LRUMap
 [findbugs]   org.apache.hadoop.hbase.util.Bytes
 [findbugs]   org.antlr.runtime.MismatchedTokenException
 [findbugs]   org.codehaus.jackson.JsonParser
 [findbugs]   com.jcraft.jsch.UserInfo
 [findbugs]   org.python.core.PyException
 [findbugs]   org.apache.commons.cli.ParseException
 [findbugs]   org.apache.hadoop.io.compress.CompressionOutputStream
 [findbugs]   org.apache.hadoop.hbase.filter.WritableByteArrayComparable
 [findbugs]   org.antlr.runtime.tree.CommonTreeNodeStream
 [findbugs]   org.apache.log4j.Level
 [findbugs]   org.apache.hadoop.hbase.client.Scan
 [findbugs]   org.jruby.anno.JRubyMethod
 [findbugs]   org.apache.hadoop.mapreduce.Job
 [findbugs]   com.google.common.util.concurrent.Futures
 [findbugs]   org.apache.commons.logging.LogFactory
 [findbugs]   org.apache.commons.codec.binary.Base64
 [findbugs]   org.codehaus.jackson.map.ObjectMapper
 [findbugs]   org.apache.hadoop.fs.FileSystem
 [findbugs]   org.jruby.embed.LocalContextScope
 [findbugs]   org.apache.hadoop.hbase.filter.FilterList$Operator
 [findbugs]   org.jruby.RubySymbol
 [findbugs]   org.apache.hadoop.hbase.io.ImmutableBytesWritable
 [findbugs]   org.apache.hadoop.io.serializer.SerializationFactory
 [findbugs]   org.antlr.runtime.tree.TreeAdaptor
 [findbugs]   org.apache.hadoop.mapred.RunningJob
 [findbugs]   org.antlr.runtime.CommonTokenStream
 [findbugs]   org.apache.hadoop.io.DataInputBuffer
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile
 [findbugs]   org.apache.commons.cli.GnuParser
 [findbugs]   org.mozilla.javascript.Context
 [findbugs]   org.apache.hadoop.io.FloatWritable
 [findbugs]   org.antlr.runtime.tree.RewriteEarlyExitException
 [findbugs]   org.apache.hadoop.hbase.HBaseConfiguration
 [findbugs]   org.codehaus.jackson.JsonGenerationException
 [findbugs]   org.apache.hadoop.mapreduce.TaskInputOutputContext
 [findbugs]   org.apache.hadoop.io.compress.GzipCodec
 [findbugs]   org.jruby.RubyString
 [findbugs]   org.antlr.runtime.BaseRecognizer
 [findbugs]   org.apache.hadoop.fs.FileUtil
 [findbugs]   org.apache.hadoop.fs.Path
 [findbugs]   org.jruby.RubyFixnum
 [findbugs]   org.apache.hadoop.hbase.client.Put
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Writer
 [findbugs]   jline.ConsoleReader
 [findbugs]   com.google.common.collect.Lists
 [findbugs]   org.apache.hadoop.mapreduce.MapContext
 [findbugs]   org.python.core.PyJavaPackage
 [findbugs]   org.apache.hadoop.hbase.filter.ColumnPrefixFilter
 [findbugs]   org.python.core.PyStringMap
 [findbugs]   groovy.util.ResourceException
 [findbugs]   org.apache.hadoop.mapreduce.TaskID
 [findbugs]   org.jruby.embed.ScriptingContainer
 [findbugs]   org.jruby.Ruby
 [findbugs]   groovy.util.GroovyScriptEngine
 [findbugs]   org.apache.hadoop.hbase.client.HTable
 [findbugs]   org.apache.hadoop.io.FloatWritable$Comparator
 [findbugs]   org.apache.zookeeper.ZooKeeper
 [findbugs]   org.codehaus.jackson.map.JsonMappingException
 [findbugs]   org.python.core.PyFunction
 [findbugs]   org.antlr.runtime.TokenSource
 [findbugs]   com.google.common.io.Files
 [findbugs]   groovy.util.ScriptException
 [findbugs]   com.jcraft.jsch.ChannelDirectTCPIP
 [findbugs]   com.jcraft.jsch.JSchException
 [findbugs]   org.python.util.PythonInterpreter
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil
 [findbugs]   org.python.core.PyInteger
 [findbugs]   org.apache.hadoop.mapred.JobConf
 [findbugs]   org.apache.hadoop.util.bloom.Key
 [findbugs]   org.apache.hadoop.io.Text
 [findbugs]   org.antlr.runtime.NoViableAltException
 [findbugs]   org.apache.hadoop.util.GenericOptionsParser
 [findbugs]   org.apache.hadoop.mapreduce.JobID
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptID
 [findbugs]   org.apache.hadoop.filecache.DistributedCache
 [findbugs]   org.apache.hadoop.fs.FSDataOutputStream
 [findbugs]   org.python.core.PyList
 [findbugs]   org.antlr.runtime.tree.TreeNodeStream
 [findbugs]   org.apache.hadoop.hbase.filter.BinaryComparator
 [findbugs]   dk.brics.automaton.RegExp
 [findbugs]   org.mozilla.javascript.Scriptable
 [findbugs]   org.mozilla.javascript.EcmaError
 [findbugs]   org.apache.hadoop.io.serializer.Serializer
 [findbugs]   org.fusesource.jansi.AnsiConsole
 [findbugs]   org.apache.hadoop.util.bloom.Filter
 [findbugs]   org.python.core.PyNone
 [findbugs]   org.mozilla.javascript.Function
 [findbugs]   org.python.core.PySystemState
 [findbugs]   org.antlr.runtime.RecognizerSharedState
 [findbugs]   org.codehaus.jackson.JsonFactory
 [findbugs]   org.antlr.runtime.EarlyExitException
 [findbugs]   org.apache.hadoop.hdfs.DistributedFileSystem
 [findbugs]   org.apache.hadoop.util.LineReader
 [findbugs] Warnings generated: 39
 [findbugs] Missing classes: 272
 [findbugs] Calculating exit code...
 [findbugs] Setting 'missing class' flag (2)
 [findbugs] Setting 'bugs found' flag (1)
 [findbugs] Exit code set to: 3
 [findbugs] Java Result: 3
 [findbugs] Classes needed for analysis were missing
 [findbugs] Output saved to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml>
     [xslt] Processing <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml> to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.html>
     [xslt] Loading stylesheet /home/jenkins/tools/findbugs/latest/src/xsl/default.xsl

BUILD SUCCESSFUL
Total time: 5 minutes 12 seconds


======================================================================
======================================================================
STORE: saving artifacts
======================================================================
======================================================================




======================================================================
======================================================================
CLEAN: cleaning workspace
======================================================================
======================================================================


Buildfile: build.xml

clean:
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src-gen>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/docs/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/test/org/apache/pig/test/utils/dotGraph/parser>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig.jar>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig-withouthadoop.jar>

clean:

clean:

BUILD SUCCESSFUL
Total time: 0 seconds


======================================================================
======================================================================
ANALYSIS: ant -Drun.clover=true -Dclover.home=/homes/hudson/tools/clover/latest clover test-commit generate-clover-reports -Dtest.junit.output.format=xml -Dtest.output=yes -Dversion=${BUILD_ID} -Dfindbugs.home=$FINDBUGS_HOME -Djava5.home=$JAVA5_HOME -Dforrest.home=$FORREST_HOME -Dclover.home=$CLOVER_HOME -Declipse.home=$ECLIPSE_HOME
======================================================================
======================================================================


Buildfile: build.xml

clover.setup:
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/clover/db>
[clover-setup] Clover Version 3.1.0, built on May 31 2011 (build-821)
[clover-setup] Loaded from: /home/jenkins/tools/clover/latest/lib/clover.jar

BUILD FAILED
java.lang.RuntimeException: Clover upgrades for your license ended December 14 2010, and this version of Clover was built May 31 2011. Please visit http://www.atlassian.com/clover/renew for information on upgrading your license.
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:103)
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:25)
	at com.cenqua.clover.tasks.AbstractCloverTask.execute(AbstractCloverTask.java:52)
	at org.apache.tools.ant.UnknownElement.execute(UnknownElement.java:288)
	at sun.reflect.GeneratedMethodAccessor1.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
	at java.lang.reflect.Method.invoke(Method.java:597)
	at org.apache.tools.ant.dispatch.DispatchUtils.execute(DispatchUtils.java:106)
	at org.apache.tools.ant.Task.perform(Task.java:348)
	at org.apache.tools.ant.Target.execute(Target.java:357)
	at org.apache.tools.ant.Target.performTasks(Target.java:385)
	at org.apache.tools.ant.Project.executeSortedTargets(Project.java:1337)
	at org.apache.tools.ant.Project.executeTarget(Project.java:1306)
	at org.apache.tools.ant.helper.DefaultExecutor.executeTargets(DefaultExecutor.java:41)
	at org.apache.tools.ant.Project.executeTargets(Project.java:1189)
	at org.apache.tools.ant.Main.runBuild(Main.java:758)
	at org.apache.tools.ant.Main.startAnt(Main.java:217)
	at org.apache.tools.ant.launch.Launcher.run(Launcher.java:257)
	at org.apache.tools.ant.launch.Launcher.main(Launcher.java:104)

Total time: 0 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1297

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1297/changes>

Changes:

[jcoveney] properly commit PIG-2862 (jcoveney)

------------------------------------------
[...truncated 37617 lines...]
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:129)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/10 10:35:16 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] Shutting down DataNode 2
    [junit] 12/08/10 10:35:16 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/10 10:35:16 INFO ipc.Server: Stopping server on 57696
    [junit] 12/08/10 10:35:16 INFO ipc.Server: IPC Server handler 0 on 57696: exiting
    [junit] 12/08/10 10:35:16 INFO ipc.Server: IPC Server handler 2 on 57696: exiting
    [junit] 12/08/10 10:35:16 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/10 10:35:16 INFO ipc.Server: Stopping IPC Server listener on 57696
    [junit] 12/08/10 10:35:16 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/10 10:35:16 INFO ipc.Server: IPC Server handler 1 on 57696: exiting
    [junit] 12/08/10 10:35:16 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/08/10 10:35:16 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:41581, storageID=DS-785319777-67.195.138.20-41581-1344594408543, infoPort=40539, ipcPort=57696):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/08/10 10:35:16 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/08/10 10:35:16 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:37837 to delete  blk_-7997423437452859717_1078
    [junit] 12/08/10 10:35:16 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:48038 to delete  blk_-4204589046545666147_1073 blk_-7997423437452859717_1078
    [junit] 12/08/10 10:35:16 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/08/10 10:35:17 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/10 10:35:17 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:41581, storageID=DS-785319777-67.195.138.20-41581-1344594408543, infoPort=40539, ipcPort=57696):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data5/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data6/current'}>
    [junit] 12/08/10 10:35:17 INFO ipc.Server: Stopping server on 57696
    [junit] 12/08/10 10:35:17 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/10 10:35:17 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/10 10:35:17 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/08/10 10:35:17 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/08/10 10:35:17 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1936845211
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1936845211
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] Shutting down DataNode 1
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:129)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/10 10:35:17 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/10 10:35:17 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/10 10:35:17 INFO ipc.Server: Stopping server on 33558
    [junit] 12/08/10 10:35:17 INFO ipc.Server: IPC Server handler 0 on 33558: exiting
    [junit] 12/08/10 10:35:17 INFO ipc.Server: IPC Server handler 2 on 33558: exiting
    [junit] 12/08/10 10:35:17 INFO ipc.Server: Stopping IPC Server listener on 33558
    [junit] 12/08/10 10:35:17 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/10 10:35:17 INFO ipc.Server: IPC Server handler 1 on 33558: exiting
    [junit] 12/08/10 10:35:17 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/10 10:35:17 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:60683, storageID=DS-1605997614-67.195.138.20-60683-1344594408160, infoPort=40738, ipcPort=33558):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/08/10 10:35:17 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/08/10 10:35:17 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/08/10 10:35:17 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/08/10 10:35:17 INFO datanode.DataNode: Scheduling block blk_-7997423437452859717_1078 file build/test/data/dfs/data/data1/current/blk_-7997423437452859717 for deletion
    [junit] 12/08/10 10:35:17 INFO datanode.DataNode: Deleted block blk_-7997423437452859717_1078 at file build/test/data/dfs/data/data1/current/blk_-7997423437452859717
    [junit] 12/08/10 10:35:18 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/10 10:35:18 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:60683, storageID=DS-1605997614-67.195.138.20-60683-1344594408160, infoPort=40738, ipcPort=33558):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data3/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data4/current'}>
    [junit] 12/08/10 10:35:18 INFO ipc.Server: Stopping server on 33558
    [junit] 12/08/10 10:35:18 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/10 10:35:18 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/10 10:35:18 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/08/10 10:35:18 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/08/10 10:35:18 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-970643974
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-970643974
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:129)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/10 10:35:18 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/10 10:35:18 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] Shutting down DataNode 0
    [junit] 12/08/10 10:35:18 INFO ipc.Server: Stopping server on 47195
    [junit] 12/08/10 10:35:18 INFO ipc.Server: Stopping IPC Server listener on 47195
    [junit] 12/08/10 10:35:18 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/10 10:35:18 INFO ipc.Server: IPC Server handler 2 on 47195: exiting
    [junit] 12/08/10 10:35:18 INFO ipc.Server: IPC Server handler 1 on 47195: exiting
    [junit] 12/08/10 10:35:18 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/10 10:35:18 INFO ipc.Server: IPC Server handler 0 on 47195: exiting
    [junit] 12/08/10 10:35:18 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/08/10 10:35:18 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:37837, storageID=DS-457814254-67.195.138.20-37837-1344594407734, infoPort=44602, ipcPort=47195):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/08/10 10:35:18 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/08/10 10:35:18 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/08/10 10:35:19 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/10 10:35:19 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:37837, storageID=DS-457814254-67.195.138.20-37837-1344594407734, infoPort=44602, ipcPort=47195):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data1/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data2/current'}>
    [junit] 12/08/10 10:35:19 WARN util.MBeans: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.unRegisterMXBean(DataNode.java:513)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:726)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.run(DataNode.java:1442)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/08/10 10:35:19 INFO ipc.Server: Stopping server on 47195
    [junit] 12/08/10 10:35:19 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/10 10:35:19 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/08/10 10:35:19 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/08/10 10:35:19 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/08/10 10:35:19 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1665193613
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1665193613
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:129)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/08/10 10:35:19 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/08/10 10:35:19 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/08/10 10:35:19 INFO namenode.DecommissionManager: Interrupted Monitor
    [junit] java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.hdfs.server.namenode.DecommissionManager$Monitor.run(DecommissionManager.java:65)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/08/10 10:35:19 WARN namenode.FSNamesystem: ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 12/08/10 10:35:19 INFO namenode.FSNamesystem: Number of transactions: 382 Total time for transactions(ms): 8Number of transactions batched in Syncs: 153 Number of syncs: 287 SyncTimes(ms): 2585 218 
    [junit] 12/08/10 10:35:19 INFO ipc.Server: Stopping server on 57145
    [junit] 12/08/10 10:35:19 INFO ipc.Server: IPC Server handler 0 on 57145: exiting
    [junit] 12/08/10 10:35:19 INFO ipc.Server: IPC Server handler 2 on 57145: exiting
    [junit] 12/08/10 10:35:19 INFO ipc.Server: IPC Server handler 3 on 57145: exiting
    [junit] 12/08/10 10:35:19 INFO ipc.Server: IPC Server handler 1 on 57145: exiting
    [junit] 12/08/10 10:35:19 INFO ipc.Server: IPC Server handler 4 on 57145: exiting
    [junit] 12/08/10 10:35:19 INFO ipc.Server: IPC Server handler 5 on 57145: exiting
    [junit] 12/08/10 10:35:19 INFO ipc.Server: IPC Server handler 6 on 57145: exiting
    [junit] 12/08/10 10:35:19 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/08/10 10:35:19 INFO ipc.Server: Stopping IPC Server listener on 57145
    [junit] 12/08/10 10:35:19 INFO ipc.Server: IPC Server handler 9 on 57145: exiting
    [junit] 12/08/10 10:35:19 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/08/10 10:35:19 INFO ipc.Server: IPC Server handler 8 on 57145: exiting
    [junit] 12/08/10 10:35:19 INFO ipc.Server: IPC Server handler 7 on 57145: exiting
    [junit] Tests run: 17, Failures: 3, Errors: 3, Time elapsed: 505.189 sec
    [junit] Test org.apache.pig.test.TestStore FAILED
    [junit] Running org.apache.pig.test.TestStringUDFs
    [junit] 12/08/10 10:35:20 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.NullPointerException
    [junit] 12/08/10 10:35:20 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/08/10 10:35:20 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -1
    [junit] 12/08/10 10:35:20 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -8
    [junit] 12/08/10 10:35:20 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/08/10 10:35:20 WARN builtin.INDEXOF: No logger object provided to UDF: org.apache.pig.builtin.INDEXOF. Failed to process input; error - null
    [junit] 12/08/10 10:35:20 WARN builtin.LAST_INDEX_OF: No logger object provided to UDF: org.apache.pig.builtin.LAST_INDEX_OF. Failed to process input; error - null
    [junit] Tests run: 11, Failures: 0, Errors: 0, Time elapsed: 0.105 sec
   [delete] Deleting directory /tmp/pig_junit_tmp1167106095

BUILD FAILED
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:793: The following error occurred while executing this line:
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:861: Tests failed!

Total time: 23 minutes 10 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints

Build failed in Jenkins: Pig-trunk #1296

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1296/changes>

Changes:

[billgraham] PIG-2851: Add flag to ant to run tests with a debugger port (billgraham)

[jcoveney] fix CHANGES from PIG-2860

[jcoveney] PIG-2860: TestAvroStorageUtils.testGetConcretePathFromGlob fails on some version of hadoop (cheolsoo via jcoveney)

------------------------------------------
[...truncated 6475 lines...]
 [findbugs]   org.apache.hadoop.util.RunJar
 [findbugs]   org.jruby.RubyBoolean
 [findbugs]   org.apache.hadoop.mapred.Counters$Group
 [findbugs]   com.jcraft.jsch.ChannelExec
 [findbugs]   org.apache.hadoop.hbase.util.Base64
 [findbugs]   org.antlr.runtime.TokenStream
 [findbugs]   org.apache.hadoop.io.IOUtils
 [findbugs]   org.jruby.RubyBignum
 [findbugs]   com.google.common.util.concurrent.CheckedFuture
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Reader$Scanner$Entry
 [findbugs]   org.apache.hadoop.fs.FSDataInputStream
 [findbugs]   org.python.core.PyObject
 [findbugs]   jline.History
 [findbugs]   org.jruby.embed.internal.LocalContextProvider
 [findbugs]   org.apache.hadoop.io.BooleanWritable
 [findbugs]   org.apache.log4j.Logger
 [findbugs]   org.apache.hadoop.hbase.filter.FamilyFilter
 [findbugs]   groovy.lang.Tuple
 [findbugs]   org.antlr.runtime.IntStream
 [findbugs]   org.apache.hadoop.util.ReflectionUtils
 [findbugs]   org.apache.hadoop.fs.ContentSummary
 [findbugs]   org.jruby.runtime.builtin.IRubyObject
 [findbugs]   org.jruby.RubyInteger
 [findbugs]   org.python.core.PyTuple
 [findbugs]   org.mortbay.log.Log
 [findbugs]   org.apache.hadoop.conf.Configuration
 [findbugs]   com.google.common.base.Joiner
 [findbugs]   org.apache.hadoop.mapreduce.lib.input.FileSplit
 [findbugs]   org.apache.hadoop.mapred.Counters$Counter
 [findbugs]   com.jcraft.jsch.Channel
 [findbugs]   org.apache.hadoop.mapred.JobPriority
 [findbugs]   org.apache.commons.cli.Options
 [findbugs]   org.apache.hadoop.mapred.JobID
 [findbugs]   org.apache.hadoop.util.bloom.BloomFilter
 [findbugs]   org.python.core.PyFrame
 [findbugs]   org.apache.hadoop.hbase.filter.CompareFilter
 [findbugs]   org.apache.hadoop.util.VersionInfo
 [findbugs]   org.python.core.PyString
 [findbugs]   org.apache.hadoop.io.Text$Comparator
 [findbugs]   org.jruby.runtime.Block
 [findbugs]   org.antlr.runtime.MismatchedSetException
 [findbugs]   org.apache.hadoop.io.BytesWritable
 [findbugs]   org.apache.hadoop.fs.FsShell
 [findbugs]   org.mozilla.javascript.ImporterTopLevel
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableOutputFormat
 [findbugs]   org.apache.hadoop.mapred.TaskReport
 [findbugs]   org.antlr.runtime.tree.RewriteRuleSubtreeStream
 [findbugs]   org.apache.commons.cli.HelpFormatter
 [findbugs]   com.google.common.collect.Maps
 [findbugs]   org.mozilla.javascript.NativeObject
 [findbugs]   org.apache.hadoop.hbase.HConstants
 [findbugs]   org.apache.hadoop.io.serializer.Deserializer
 [findbugs]   org.antlr.runtime.FailedPredicateException
 [findbugs]   org.apache.hadoop.io.compress.CompressionCodec
 [findbugs]   org.jruby.RubyNil
 [findbugs]   org.apache.hadoop.fs.FileStatus
 [findbugs]   org.apache.hadoop.hbase.client.Result
 [findbugs]   org.apache.hadoop.mapreduce.JobContext
 [findbugs]   org.codehaus.jackson.JsonGenerator
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptContext
 [findbugs]   org.apache.hadoop.io.BytesWritable$Comparator
 [findbugs]   org.apache.hadoop.io.LongWritable$Comparator
 [findbugs]   org.codehaus.jackson.map.util.LRUMap
 [findbugs]   org.apache.hadoop.hbase.util.Bytes
 [findbugs]   org.antlr.runtime.MismatchedTokenException
 [findbugs]   org.codehaus.jackson.JsonParser
 [findbugs]   com.jcraft.jsch.UserInfo
 [findbugs]   org.python.core.PyException
 [findbugs]   org.apache.commons.cli.ParseException
 [findbugs]   org.apache.hadoop.io.compress.CompressionOutputStream
 [findbugs]   org.apache.hadoop.hbase.filter.WritableByteArrayComparable
 [findbugs]   org.antlr.runtime.tree.CommonTreeNodeStream
 [findbugs]   org.apache.log4j.Level
 [findbugs]   org.apache.hadoop.hbase.client.Scan
 [findbugs]   org.jruby.anno.JRubyMethod
 [findbugs]   org.apache.hadoop.mapreduce.Job
 [findbugs]   com.google.common.util.concurrent.Futures
 [findbugs]   org.apache.commons.logging.LogFactory
 [findbugs]   org.apache.commons.codec.binary.Base64
 [findbugs]   org.codehaus.jackson.map.ObjectMapper
 [findbugs]   org.apache.hadoop.fs.FileSystem
 [findbugs]   org.jruby.embed.LocalContextScope
 [findbugs]   org.apache.hadoop.hbase.filter.FilterList$Operator
 [findbugs]   org.jruby.RubySymbol
 [findbugs]   org.apache.hadoop.hbase.io.ImmutableBytesWritable
 [findbugs]   org.apache.hadoop.io.serializer.SerializationFactory
 [findbugs]   org.antlr.runtime.tree.TreeAdaptor
 [findbugs]   org.apache.hadoop.mapred.RunningJob
 [findbugs]   org.antlr.runtime.CommonTokenStream
 [findbugs]   org.apache.hadoop.io.DataInputBuffer
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile
 [findbugs]   org.apache.commons.cli.GnuParser
 [findbugs]   org.mozilla.javascript.Context
 [findbugs]   org.apache.hadoop.io.FloatWritable
 [findbugs]   org.antlr.runtime.tree.RewriteEarlyExitException
 [findbugs]   org.apache.hadoop.hbase.HBaseConfiguration
 [findbugs]   org.codehaus.jackson.JsonGenerationException
 [findbugs]   org.apache.hadoop.mapreduce.TaskInputOutputContext
 [findbugs]   org.apache.hadoop.io.compress.GzipCodec
 [findbugs]   org.jruby.RubyString
 [findbugs]   org.antlr.runtime.BaseRecognizer
 [findbugs]   org.apache.hadoop.fs.FileUtil
 [findbugs]   org.apache.hadoop.fs.Path
 [findbugs]   org.jruby.RubyFixnum
 [findbugs]   org.apache.hadoop.hbase.client.Put
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Writer
 [findbugs]   jline.ConsoleReader
 [findbugs]   com.google.common.collect.Lists
 [findbugs]   org.apache.hadoop.mapreduce.MapContext
 [findbugs]   org.python.core.PyJavaPackage
 [findbugs]   org.apache.hadoop.hbase.filter.ColumnPrefixFilter
 [findbugs]   org.python.core.PyStringMap
 [findbugs]   groovy.util.ResourceException
 [findbugs]   org.apache.hadoop.mapreduce.TaskID
 [findbugs]   org.jruby.embed.ScriptingContainer
 [findbugs]   org.jruby.Ruby
 [findbugs]   groovy.util.GroovyScriptEngine
 [findbugs]   org.apache.hadoop.hbase.client.HTable
 [findbugs]   org.apache.hadoop.io.FloatWritable$Comparator
 [findbugs]   org.apache.zookeeper.ZooKeeper
 [findbugs]   org.codehaus.jackson.map.JsonMappingException
 [findbugs]   org.python.core.PyFunction
 [findbugs]   org.antlr.runtime.TokenSource
 [findbugs]   com.google.common.io.Files
 [findbugs]   groovy.util.ScriptException
 [findbugs]   com.jcraft.jsch.ChannelDirectTCPIP
 [findbugs]   com.jcraft.jsch.JSchException
 [findbugs]   org.python.util.PythonInterpreter
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil
 [findbugs]   org.python.core.PyInteger
 [findbugs]   org.apache.hadoop.mapred.JobConf
 [findbugs]   org.apache.hadoop.util.bloom.Key
 [findbugs]   org.apache.hadoop.io.Text
 [findbugs]   org.antlr.runtime.NoViableAltException
 [findbugs]   org.apache.hadoop.util.GenericOptionsParser
 [findbugs]   org.apache.hadoop.mapreduce.JobID
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptID
 [findbugs]   org.apache.hadoop.filecache.DistributedCache
 [findbugs]   org.apache.hadoop.fs.FSDataOutputStream
 [findbugs]   org.python.core.PyList
 [findbugs]   org.antlr.runtime.tree.TreeNodeStream
 [findbugs]   org.apache.hadoop.hbase.filter.BinaryComparator
 [findbugs]   dk.brics.automaton.RegExp
 [findbugs]   org.mozilla.javascript.Scriptable
 [findbugs]   org.mozilla.javascript.EcmaError
 [findbugs]   org.apache.hadoop.io.serializer.Serializer
 [findbugs]   org.fusesource.jansi.AnsiConsole
 [findbugs]   org.apache.hadoop.util.bloom.Filter
 [findbugs]   org.python.core.PyNone
 [findbugs]   org.mozilla.javascript.Function
 [findbugs]   org.python.core.PySystemState
 [findbugs]   org.antlr.runtime.RecognizerSharedState
 [findbugs]   org.codehaus.jackson.JsonFactory
 [findbugs]   org.antlr.runtime.EarlyExitException
 [findbugs]   org.apache.hadoop.hdfs.DistributedFileSystem
 [findbugs]   org.apache.hadoop.util.LineReader
 [findbugs] Warnings generated: 39
 [findbugs] Missing classes: 272
 [findbugs] Calculating exit code...
 [findbugs] Setting 'missing class' flag (2)
 [findbugs] Setting 'bugs found' flag (1)
 [findbugs] Exit code set to: 3
 [findbugs] Java Result: 3
 [findbugs] Classes needed for analysis were missing
 [findbugs] Output saved to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml>
     [xslt] Processing <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml> to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.html>
     [xslt] Loading stylesheet /home/jenkins/tools/findbugs/latest/src/xsl/default.xsl

BUILD SUCCESSFUL
Total time: 5 minutes 27 seconds


======================================================================
======================================================================
STORE: saving artifacts
======================================================================
======================================================================




======================================================================
======================================================================
CLEAN: cleaning workspace
======================================================================
======================================================================


Buildfile: build.xml

clean:
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src-gen>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/docs/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/test/org/apache/pig/test/utils/dotGraph/parser>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig.jar>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig-withouthadoop.jar>

clean:

clean:

BUILD SUCCESSFUL
Total time: 0 seconds


======================================================================
======================================================================
ANALYSIS: ant -Drun.clover=true -Dclover.home=/homes/hudson/tools/clover/latest clover test-commit generate-clover-reports -Dtest.junit.output.format=xml -Dtest.output=yes -Dversion=${BUILD_ID} -Dfindbugs.home=$FINDBUGS_HOME -Djava5.home=$JAVA5_HOME -Dforrest.home=$FORREST_HOME -Dclover.home=$CLOVER_HOME -Declipse.home=$ECLIPSE_HOME
======================================================================
======================================================================


Buildfile: build.xml

clover.setup:
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/clover/db>
[clover-setup] Clover Version 3.1.0, built on May 31 2011 (build-821)
[clover-setup] Loaded from: /home/jenkins/tools/clover/latest/lib/clover.jar

BUILD FAILED
java.lang.RuntimeException: Clover upgrades for your license ended December 14 2010, and this version of Clover was built May 31 2011. Please visit http://www.atlassian.com/clover/renew for information on upgrading your license.
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:103)
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:25)
	at com.cenqua.clover.tasks.AbstractCloverTask.execute(AbstractCloverTask.java:52)
	at org.apache.tools.ant.UnknownElement.execute(UnknownElement.java:288)
	at sun.reflect.GeneratedMethodAccessor1.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
	at java.lang.reflect.Method.invoke(Method.java:597)
	at org.apache.tools.ant.dispatch.DispatchUtils.execute(DispatchUtils.java:106)
	at org.apache.tools.ant.Task.perform(Task.java:348)
	at org.apache.tools.ant.Target.execute(Target.java:357)
	at org.apache.tools.ant.Target.performTasks(Target.java:385)
	at org.apache.tools.ant.Project.executeSortedTargets(Project.java:1337)
	at org.apache.tools.ant.Project.executeTarget(Project.java:1306)
	at org.apache.tools.ant.helper.DefaultExecutor.executeTargets(DefaultExecutor.java:41)
	at org.apache.tools.ant.Project.executeTargets(Project.java:1189)
	at org.apache.tools.ant.Main.runBuild(Main.java:758)
	at org.apache.tools.ant.Main.startAnt(Main.java:217)
	at org.apache.tools.ant.launch.Launcher.run(Launcher.java:257)
	at org.apache.tools.ant.launch.Launcher.main(Launcher.java:104)

Total time: 1 second
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints