You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@pig.apache.org by Apache Jenkins Server <je...@builds.apache.org> on 2012/01/17 23:41:47 UTC

Build failed in Jenkins: Pig-trunk #1174

See <https://builds.apache.org/job/Pig-trunk/1174/changes>

Changes:

[dvryaboy] PIG-2359: Support more efficient Tuples when schemas are known (part 2)

------------------------------------------
[...truncated 37037 lines...]
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/01/17 22:33:18 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/01/17 22:33:18 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/01/17 22:33:18 INFO ipc.Server: Stopping server on 56123
    [junit] 12/01/17 22:33:18 INFO ipc.Server: IPC Server handler 0 on 56123: exiting
    [junit] 12/01/17 22:33:18 INFO ipc.Server: Stopping IPC Server listener on 56123
    [junit] 12/01/17 22:33:18 INFO ipc.Server: IPC Server handler 2 on 56123: exiting
    [junit] 12/01/17 22:33:18 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/01/17 22:33:18 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/01/17 22:33:18 INFO ipc.Server: IPC Server handler 1 on 56123: exiting
    [junit] 12/01/17 22:33:18 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/01/17 22:33:18 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:47324, storageID=DS-1849659132-67.195.138.20-47324-1326839095771, infoPort=37487, ipcPort=56123):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/01/17 22:33:18 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Scheduling block blk_-5914952160057491263_1095 file build/test/data/dfs/data/data2/current/blk_-5914952160057491263 for deletion
    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Scheduling block blk_-3299861067875593375_1102 file build/test/data/dfs/data/data2/current/blk_-3299861067875593375 for deletion
    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Scheduling block blk_-2378485608018695930_1101 file build/test/data/dfs/data/data2/current/blk_-2378485608018695930 for deletion
    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Deleted block blk_-5914952160057491263_1095 at file build/test/data/dfs/data/data2/current/blk_-5914952160057491263
    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Scheduling block blk_1266225547534690160_1102 file build/test/data/dfs/data/data1/current/blk_1266225547534690160 for deletion
    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Deleted block blk_-3299861067875593375_1102 at file build/test/data/dfs/data/data2/current/blk_-3299861067875593375
    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Deleted block blk_1266225547534690160_1102 at file build/test/data/dfs/data/data1/current/blk_1266225547534690160
    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Deleted block blk_-2378485608018695930_1101 at file build/test/data/dfs/data/data2/current/blk_-2378485608018695930
    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Scheduling block blk_-5914952160057491263_1095 file build/test/data/dfs/data/data3/current/blk_-5914952160057491263 for deletion
    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Scheduling block blk_-3299861067875593375_1102 file build/test/data/dfs/data/data4/current/blk_-3299861067875593375 for deletion
    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Deleted block blk_-5914952160057491263_1095 at file build/test/data/dfs/data/data3/current/blk_-5914952160057491263
    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Scheduling block blk_-2378485608018695930_1101 file build/test/data/dfs/data/data4/current/blk_-2378485608018695930 for deletion
    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Deleted block blk_-3299861067875593375_1102 at file build/test/data/dfs/data/data4/current/blk_-3299861067875593375
    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Scheduling block blk_1266225547534690160_1102 file build/test/data/dfs/data/data3/current/blk_1266225547534690160 for deletion
    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Deleted block blk_-2378485608018695930_1101 at file build/test/data/dfs/data/data4/current/blk_-2378485608018695930
    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Deleted block blk_1266225547534690160_1102 at file build/test/data/dfs/data/data3/current/blk_1266225547534690160
    [junit] 12/01/17 22:33:19 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:47324, storageID=DS-1849659132-67.195.138.20-47324-1326839095771, infoPort=37487, ipcPort=56123):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data5/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data6/current'}>
    [junit] 12/01/17 22:33:19 INFO ipc.Server: Stopping server on 56123
    [junit] 12/01/17 22:33:19 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/01/17 22:33:19 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/01/17 22:33:19 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/01/17 22:33:19 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1509900428
    [junit] Shutting down DataNode 1
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1509900428
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:128)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/01/17 22:33:19 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/01/17 22:33:19 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/01/17 22:33:19 INFO ipc.Server: Stopping server on 40885
    [junit] 12/01/17 22:33:19 INFO ipc.Server: IPC Server handler 0 on 40885: exiting
    [junit] 12/01/17 22:33:19 INFO ipc.Server: Stopping IPC Server listener on 40885
    [junit] 12/01/17 22:33:19 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/01/17 22:33:19 INFO ipc.Server: IPC Server handler 1 on 40885: exiting
    [junit] 12/01/17 22:33:19 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/01/17 22:33:19 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:45088, storageID=DS-1863537504-67.195.138.20-45088-1326839095442, infoPort=49950, ipcPort=40885):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/01/17 22:33:19 INFO ipc.Server: IPC Server handler 2 on 40885: exiting
    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/01/17 22:33:19 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:45088, storageID=DS-1863537504-67.195.138.20-45088-1326839095442, infoPort=49950, ipcPort=40885):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data3/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data4/current'}>
    [junit] 12/01/17 22:33:19 INFO ipc.Server: Stopping server on 40885
    [junit] 12/01/17 22:33:19 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/01/17 22:33:19 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/01/17 22:33:19 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/01/17 22:33:19 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1895231096
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1895231096
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:128)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/01/17 22:33:19 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] Shutting down DataNode 0
    [junit] 12/01/17 22:33:19 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/01/17 22:33:20 INFO ipc.Server: Stopping server on 41896
    [junit] 12/01/17 22:33:20 INFO ipc.Server: IPC Server handler 0 on 41896: exiting
    [junit] 12/01/17 22:33:20 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/01/17 22:33:20 INFO ipc.Server: IPC Server handler 1 on 41896: exiting
    [junit] 12/01/17 22:33:20 INFO ipc.Server: IPC Server handler 2 on 41896: exiting
    [junit] 12/01/17 22:33:20 INFO ipc.Server: Stopping IPC Server listener on 41896
    [junit] 12/01/17 22:33:20 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/01/17 22:33:20 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1
    [junit] 12/01/17 22:33:20 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:49647, storageID=DS-578389708-67.195.138.20-49647-1326839095094, infoPort=59242, ipcPort=41896):DataXceiveServer:java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 12/01/17 22:33:20 INFO datanode.DataNode: Exiting DataXceiveServer
    [junit] 12/01/17 22:33:20 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread.
    [junit] 12/01/17 22:33:20 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:59660 to delete  blk_-5914952160057491263_1095
    [junit] 12/01/17 22:33:20 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:47324 to delete  blk_-2378485608018695930_1101 blk_-3299861067875593375_1102 blk_1266225547534690160_1102
    [junit] 12/01/17 22:33:21 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/01/17 22:33:21 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:49647, storageID=DS-578389708-67.195.138.20-49647-1326839095094, infoPort=59242, ipcPort=41896):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data1/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data2/current'}>
    [junit] 12/01/17 22:33:21 WARN util.MBeans: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=DataNodeInfo
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.unRegisterMXBean(DataNode.java:513)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:726)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.run(DataNode.java:1442)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/01/17 22:33:21 INFO ipc.Server: Stopping server on 41896
    [junit] 12/01/17 22:33:21 INFO metrics.RpcInstrumentation: shut down
    [junit] 12/01/17 22:33:21 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0
    [junit] 12/01/17 22:33:21 INFO datanode.FSDatasetAsyncDiskService: Shutting down all async disk service threads...
    [junit] 12/01/17 22:33:21 INFO datanode.FSDatasetAsyncDiskService: All async disk service threads have been shut down.
    [junit] 12/01/17 22:33:21 WARN util.MBeans: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-982927745
    [junit] javax.management.InstanceNotFoundException: Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-982927745
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
    [junit] 	at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
    [junit] 	at com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
    [junit] 	at org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
    [junit] 	at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
    [junit] 	at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
    [junit] 	at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:128)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    [junit] 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
    [junit] 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    [junit] 	at java.lang.reflect.Method.invoke(Method.java:597)
    [junit] 	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
    [junit] 	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
    [junit] 	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
    [junit] 	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
    [junit] 	at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
    [junit] 	at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
    [junit] 	at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
    [junit] 12/01/17 22:33:21 WARN datanode.FSDatasetAsyncDiskService: AsyncDiskService has already shut down.
    [junit] 12/01/17 22:33:21 INFO mortbay.log: Stopped SelectChannelConnector@localhost:0
    [junit] 12/01/17 22:33:21 WARN namenode.FSNamesystem: ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 12/01/17 22:33:21 INFO namenode.FSNamesystem: Number of transactions: 502 Total time for transactions(ms): 10Number of transactions batched in Syncs: 165 Number of syncs: 349 SyncTimes(ms): 3426 280 
    [junit] 12/01/17 22:33:21 INFO namenode.DecommissionManager: Interrupted Monitor
    [junit] java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.hdfs.server.namenode.DecommissionManager$Monitor.run(DecommissionManager.java:65)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 12/01/17 22:33:21 INFO ipc.Server: Stopping server on 36202
    [junit] 12/01/17 22:33:21 INFO ipc.Server: IPC Server handler 0 on 36202: exiting
    [junit] 12/01/17 22:33:21 INFO ipc.Server: IPC Server handler 2 on 36202: exiting
    [junit] 12/01/17 22:33:21 INFO ipc.Server: IPC Server handler 1 on 36202: exiting
    [junit] 12/01/17 22:33:21 INFO ipc.Server: IPC Server handler 3 on 36202: exiting
    [junit] 12/01/17 22:33:21 INFO ipc.Server: IPC Server handler 4 on 36202: exiting
    [junit] 12/01/17 22:33:21 INFO ipc.Server: IPC Server handler 5 on 36202: exiting
    [junit] 12/01/17 22:33:21 INFO ipc.Server: Stopping IPC Server listener on 36202
    [junit] 12/01/17 22:33:21 INFO ipc.Server: IPC Server handler 6 on 36202: exiting
    [junit] 12/01/17 22:33:21 INFO ipc.Server: IPC Server handler 7 on 36202: exiting
    [junit] 12/01/17 22:33:21 INFO ipc.Server: IPC Server handler 9 on 36202: exiting
    [junit] 12/01/17 22:33:21 INFO ipc.Server: IPC Server handler 8 on 36202: exiting
    [junit] 12/01/17 22:33:21 INFO ipc.Server: Stopping IPC Server Responder
    [junit] 12/01/17 22:33:21 INFO metrics.RpcInstrumentation: shut down
    [junit] Tests run: 17, Failures: 3, Errors: 3, Time elapsed: 499.771 sec
    [junit] Test org.apache.pig.test.TestStore FAILED
    [junit] Running org.apache.pig.test.TestStringUDFs
    [junit] 12/01/17 22:33:22 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.NullPointerException
    [junit] 12/01/17 22:33:22 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/01/17 22:33:22 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -1
    [junit] 12/01/17 22:33:22 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -8
    [junit] 12/01/17 22:33:22 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2
    [junit] 12/01/17 22:33:22 WARN builtin.INDEXOF: No logger object provided to UDF: org.apache.pig.builtin.INDEXOF. Failed to process input; error - null
    [junit] 12/01/17 22:33:22 WARN builtin.LAST_INDEX_OF: No logger object provided to UDF: org.apache.pig.builtin.LAST_INDEX_OF. Failed to process input; error - null
    [junit] Tests run: 11, Failures: 0, Errors: 0, Time elapsed: 0.102 sec
   [delete] Deleting directory /tmp/pig_junit_tmp1053430922

BUILD FAILED
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:774: The following error occurred while executing this line:
<https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:831: Tests failed!

Total time: 22 minutes 12 seconds
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure


Build failed in Jenkins: Pig-trunk #1175

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/Pig-trunk/1175/changes>

Changes:

[daijy] PIG-2347: Fix Pig Unit tests for hadoop 23 (PIG-2347-4.patch)

[daijy] PIG-2477: TestBuiltin testLFText/testSFPig failing against 23 due to invalid test setup -- InvalidInputException

[daijy] PIG-2477: TestBuiltin testLFText/testSFPig failing against 23 due to invalid test setup -- InvalidInputException

------------------------------------------
[...truncated 6984 lines...]
 [findbugs]   jline.ConsoleReaderInputStream
 [findbugs]   org.apache.log4j.PropertyConfigurator
 [findbugs]   org.apache.hadoop.mapred.TaskID
 [findbugs]   org.apache.commons.cli.CommandLine
 [findbugs]   org.python.core.Py
 [findbugs]   org.apache.hadoop.io.BooleanWritable$Comparator
 [findbugs]   org.apache.hadoop.io.LongWritable
 [findbugs]   org.antlr.runtime.BitSet
 [findbugs]   org.apache.hadoop.mapred.jobcontrol.Job
 [findbugs]   org.apache.hadoop.hbase.filter.CompareFilter$CompareOp
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Reader
 [findbugs]   org.mozilla.javascript.NativeFunction
 [findbugs]   org.apache.hadoop.mapreduce.Counter
 [findbugs]   org.codehaus.jackson.JsonEncoding
 [findbugs]   org.codehaus.jackson.JsonParseException
 [findbugs]   org.python.core.PyCode
 [findbugs]   com.jcraft.jsch.HostKey
 [findbugs]   org.apache.hadoop.hbase.filter.Filter
 [findbugs]   org.apache.commons.logging.Log
 [findbugs]   com.google.common.util.concurrent.ListenableFuture
 [findbugs]   org.apache.hadoop.util.RunJar
 [findbugs]   org.apache.hadoop.mapred.Counters$Group
 [findbugs]   com.jcraft.jsch.ChannelExec
 [findbugs]   org.apache.hadoop.hbase.util.Base64
 [findbugs]   org.antlr.runtime.TokenStream
 [findbugs]   com.google.common.util.concurrent.CheckedFuture
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Reader$Scanner$Entry
 [findbugs]   org.apache.hadoop.fs.FSDataInputStream
 [findbugs]   org.python.core.PyObject
 [findbugs]   jline.History
 [findbugs]   org.apache.hadoop.io.BooleanWritable
 [findbugs]   org.apache.log4j.Logger
 [findbugs]   org.apache.hadoop.hbase.filter.FamilyFilter
 [findbugs]   org.antlr.runtime.IntStream
 [findbugs]   org.apache.hadoop.util.ReflectionUtils
 [findbugs]   org.apache.hadoop.fs.ContentSummary
 [findbugs]   org.python.core.PyTuple
 [findbugs]   org.apache.hadoop.conf.Configuration
 [findbugs]   com.google.common.base.Joiner
 [findbugs]   org.apache.hadoop.mapreduce.lib.input.FileSplit
 [findbugs]   org.apache.hadoop.mapred.Counters$Counter
 [findbugs]   com.jcraft.jsch.Channel
 [findbugs]   org.apache.hadoop.mapred.JobPriority
 [findbugs]   org.apache.commons.cli.Options
 [findbugs]   org.apache.hadoop.mapred.JobID
 [findbugs]   org.apache.hadoop.util.bloom.BloomFilter
 [findbugs]   org.python.core.PyFrame
 [findbugs]   org.apache.hadoop.hbase.filter.CompareFilter
 [findbugs]   org.apache.hadoop.util.VersionInfo
 [findbugs]   org.python.core.PyString
 [findbugs]   org.apache.hadoop.io.Text$Comparator
 [findbugs]   org.antlr.runtime.MismatchedSetException
 [findbugs]   org.apache.hadoop.io.BytesWritable
 [findbugs]   org.apache.hadoop.fs.FsShell
 [findbugs]   org.mozilla.javascript.ImporterTopLevel
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableOutputFormat
 [findbugs]   org.apache.hadoop.mapred.TaskReport
 [findbugs]   org.antlr.runtime.tree.RewriteRuleSubtreeStream
 [findbugs]   org.apache.commons.cli.HelpFormatter
 [findbugs]   org.mozilla.javascript.NativeObject
 [findbugs]   org.apache.hadoop.hbase.HConstants
 [findbugs]   org.apache.hadoop.io.serializer.Deserializer
 [findbugs]   org.antlr.runtime.FailedPredicateException
 [findbugs]   org.apache.hadoop.io.compress.CompressionCodec
 [findbugs]   org.apache.hadoop.fs.FileStatus
 [findbugs]   org.apache.hadoop.hbase.client.Result
 [findbugs]   org.apache.hadoop.mapreduce.JobContext
 [findbugs]   org.codehaus.jackson.JsonGenerator
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptContext
 [findbugs]   org.apache.hadoop.io.BytesWritable$Comparator
 [findbugs]   org.apache.hadoop.io.LongWritable$Comparator
 [findbugs]   org.codehaus.jackson.map.util.LRUMap
 [findbugs]   org.apache.hadoop.hbase.util.Bytes
 [findbugs]   org.antlr.runtime.MismatchedTokenException
 [findbugs]   org.codehaus.jackson.JsonParser
 [findbugs]   com.jcraft.jsch.UserInfo
 [findbugs]   org.python.core.PyException
 [findbugs]   org.apache.commons.cli.ParseException
 [findbugs]   org.apache.hadoop.io.compress.CompressionOutputStream
 [findbugs]   org.apache.hadoop.hbase.filter.WritableByteArrayComparable
 [findbugs]   org.antlr.runtime.tree.CommonTreeNodeStream
 [findbugs]   org.apache.log4j.Level
 [findbugs]   org.apache.hadoop.hbase.client.Scan
 [findbugs]   org.apache.hadoop.mapreduce.Job
 [findbugs]   com.google.common.util.concurrent.Futures
 [findbugs]   org.apache.commons.logging.LogFactory
 [findbugs]   org.apache.commons.codec.binary.Base64
 [findbugs]   org.codehaus.jackson.map.ObjectMapper
 [findbugs]   org.apache.hadoop.fs.FileSystem
 [findbugs]   org.apache.hadoop.hbase.filter.FilterList$Operator
 [findbugs]   org.apache.hadoop.hbase.io.ImmutableBytesWritable
 [findbugs]   org.apache.hadoop.io.serializer.SerializationFactory
 [findbugs]   org.antlr.runtime.tree.TreeAdaptor
 [findbugs]   org.apache.hadoop.mapred.RunningJob
 [findbugs]   org.antlr.runtime.CommonTokenStream
 [findbugs]   org.apache.hadoop.io.DataInputBuffer
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile
 [findbugs]   org.apache.commons.cli.GnuParser
 [findbugs]   org.mozilla.javascript.Context
 [findbugs]   org.apache.hadoop.io.FloatWritable
 [findbugs]   org.antlr.runtime.tree.RewriteEarlyExitException
 [findbugs]   org.apache.hadoop.hbase.HBaseConfiguration
 [findbugs]   org.codehaus.jackson.JsonGenerationException
 [findbugs]   org.apache.hadoop.mapreduce.TaskInputOutputContext
 [findbugs]   org.apache.hadoop.io.compress.GzipCodec
 [findbugs]   org.apache.hadoop.mapred.jobcontrol.JobControl
 [findbugs]   org.antlr.runtime.BaseRecognizer
 [findbugs]   org.apache.hadoop.fs.FileUtil
 [findbugs]   org.apache.hadoop.fs.Path
 [findbugs]   org.apache.hadoop.hbase.client.Put
 [findbugs]   org.apache.hadoop.io.file.tfile.TFile$Writer
 [findbugs]   jline.ConsoleReader
 [findbugs]   com.google.common.collect.Lists
 [findbugs]   org.apache.hadoop.mapreduce.MapContext
 [findbugs]   org.python.core.PyJavaPackage
 [findbugs]   org.apache.hadoop.hbase.filter.ColumnPrefixFilter
 [findbugs]   org.python.core.PyStringMap
 [findbugs]   org.apache.hadoop.mapreduce.TaskID
 [findbugs]   org.apache.hadoop.hbase.client.HTable
 [findbugs]   org.apache.hadoop.io.FloatWritable$Comparator
 [findbugs]   org.apache.zookeeper.ZooKeeper
 [findbugs]   org.codehaus.jackson.map.JsonMappingException
 [findbugs]   org.python.core.PyFunction
 [findbugs]   org.antlr.runtime.TokenSource
 [findbugs]   com.jcraft.jsch.ChannelDirectTCPIP
 [findbugs]   com.jcraft.jsch.JSchException
 [findbugs]   org.python.util.PythonInterpreter
 [findbugs]   org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil
 [findbugs]   org.python.core.PyInteger
 [findbugs]   org.apache.hadoop.mapred.JobConf
 [findbugs]   org.apache.hadoop.util.bloom.Key
 [findbugs]   org.apache.hadoop.io.Text
 [findbugs]   org.antlr.runtime.NoViableAltException
 [findbugs]   org.apache.hadoop.util.GenericOptionsParser
 [findbugs]   org.apache.hadoop.mapreduce.JobID
 [findbugs]   org.apache.hadoop.mapreduce.TaskAttemptID
 [findbugs]   org.apache.hadoop.filecache.DistributedCache
 [findbugs]   org.apache.hadoop.fs.FSDataOutputStream
 [findbugs]   org.python.core.PyList
 [findbugs]   org.antlr.runtime.tree.TreeNodeStream
 [findbugs]   org.apache.hadoop.hbase.filter.BinaryComparator
 [findbugs]   dk.brics.automaton.RegExp
 [findbugs]   org.mozilla.javascript.Scriptable
 [findbugs]   org.mozilla.javascript.EcmaError
 [findbugs]   org.apache.hadoop.io.serializer.Serializer
 [findbugs]   org.apache.hadoop.util.bloom.Filter
 [findbugs]   org.python.core.PyNone
 [findbugs]   org.mozilla.javascript.Function
 [findbugs]   org.python.core.PySystemState
 [findbugs]   org.antlr.runtime.RecognizerSharedState
 [findbugs]   org.codehaus.jackson.JsonFactory
 [findbugs]   org.antlr.runtime.EarlyExitException
 [findbugs]   org.apache.hadoop.hdfs.DistributedFileSystem
 [findbugs]   org.apache.hadoop.util.LineReader
 [findbugs] Warnings generated: 23
 [findbugs] Missing classes: 231
 [findbugs] Calculating exit code...
 [findbugs] Setting 'missing class' flag (2)
 [findbugs] Setting 'bugs found' flag (1)
 [findbugs] Exit code set to: 3
 [findbugs] Java Result: 3
 [findbugs] Classes needed for analysis were missing
 [findbugs] Output saved to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml>
     [xslt] Processing <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.xml> to <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/findbugs/pig-findbugs-report.html>
     [xslt] Loading stylesheet /home/jenkins/tools/findbugs/latest/src/xsl/default.xsl

BUILD SUCCESSFUL
Total time: 8 minutes 20 seconds


======================================================================
======================================================================
STORE: saving artifacts
======================================================================
======================================================================




======================================================================
======================================================================
CLEAN: cleaning workspace
======================================================================
======================================================================


Buildfile: build.xml

clean:
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src-gen>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/src/docs/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/build>
   [delete] Deleting directory <https://builds.apache.org/job/Pig-trunk/ws/trunk/test/org/apache/pig/test/utils/dotGraph/parser>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig.jar>
   [delete] Deleting: <https://builds.apache.org/job/Pig-trunk/ws/trunk/pig-withouthadoop.jar>

clean:

clean:

BUILD SUCCESSFUL
Total time: 0 seconds


======================================================================
======================================================================
ANALYSIS: ant -Drun.clover=true -Dclover.home=/homes/hudson/tools/clover/latest clover test-commit generate-clover-reports -Dtest.junit.output.format=xml -Dtest.output=yes -Dversion=${BUILD_ID} -Dfindbugs.home=$FINDBUGS_HOME -Djava5.home=$JAVA5_HOME -Dforrest.home=$FORREST_HOME -Dclover.home=$CLOVER_HOME -Declipse.home=$ECLIPSE_HOME
======================================================================
======================================================================


Buildfile: build.xml

clover.setup:
    [mkdir] Created dir: <https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/clover/db>
[clover-setup] Clover Version 3.1.0, built on May 31 2011 (build-821)
[clover-setup] Loaded from: /home/jenkins/tools/clover/latest/lib/clover.jar

BUILD FAILED
java.lang.RuntimeException: Clover upgrades for your license ended December 14 2010, and this version of Clover was built May 31 2011. Please visit http://www.atlassian.com/clover/renew for information on upgrading your license.
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:103)
	at com.cenqua.clover.CloverStartup.loadLicense(CloverStartup.java:25)
	at com.cenqua.clover.tasks.AbstractCloverTask.execute(AbstractCloverTask.java:52)
	at org.apache.tools.ant.UnknownElement.execute(UnknownElement.java:288)
	at sun.reflect.GeneratedMethodAccessor1.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
	at java.lang.reflect.Method.invoke(Method.java:597)
	at org.apache.tools.ant.dispatch.DispatchUtils.execute(DispatchUtils.java:106)
	at org.apache.tools.ant.Task.perform(Task.java:348)
	at org.apache.tools.ant.Target.execute(Target.java:357)
	at org.apache.tools.ant.Target.performTasks(Target.java:385)
	at org.apache.tools.ant.Project.executeSortedTargets(Project.java:1337)
	at org.apache.tools.ant.Project.executeTarget(Project.java:1306)
	at org.apache.tools.ant.helper.DefaultExecutor.executeTargets(DefaultExecutor.java:41)
	at org.apache.tools.ant.Project.executeTargets(Project.java:1189)
	at org.apache.tools.ant.Main.runBuild(Main.java:758)
	at org.apache.tools.ant.Main.startAnt(Main.java:217)
	at org.apache.tools.ant.launch.Launcher.run(Launcher.java:257)
	at org.apache.tools.ant.launch.Launcher.main(Launcher.java:104)

Total time: 1 second
Build step 'Execute shell' marked build as failure
[FINDBUGS] Skipping publisher since build result is FAILURE
Recording test results
Publishing Javadoc
Archiving artifacts
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure


Re: Build failed in Jenkins: Pig-trunk #1174

Posted by Dmitriy Ryaboy <dv...@gmail.com>.
Hm. I just tried TestStore and it passed from a clean checkout on my
machine.

Looking at the failing test in Jenkins, it's got stack traces like this one
below.. anyone know what could be the cause?

12/01/17 22:33:13 INFO mapred.TaskInProgress: Error from
attempt_20120117222456151_0012_m_000001_0:
java.lang.NumberFormatException: For input string:
"18446743988250694508"
	at java.lang.NumberFormatException.forInputString(NumberFormatException.java:48)
	at java.lang.Long.parseLong(Long.java:422)
	at java.lang.Long.parseLong(Long.java:468)
	at org.apache.hadoop.util.ProcfsBasedProcessTree.constructProcessInfo(ProcfsBasedProcessTree.java:413)
	at org.apache.hadoop.util.ProcfsBasedProcessTree.getProcessTree(ProcfsBasedProcessTree.java:148)
	at org.apache.hadoop.util.LinuxResourceCalculatorPlugin.getProcResourceValues(LinuxResourceCalculatorPlugin.java:401)
	at org.apache.hadoop.mapred.Task.initialize(Task.java:536)
	at org.apache.hadoop.mapred.MapTask.run(MapTask.java:353)
	at org.apache.hadoop.mapred.Child$4.run(Child.java:255)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:396)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1083)
	at org.apache.hadoop.mapred.Child.main(Child.java:249)


On Tue, Jan 17, 2012 at 2:41 PM, Apache Jenkins Server <
jenkins@builds.apache.org> wrote:

> See <https://builds.apache.org/job/Pig-trunk/1174/changes>
>
> Changes:
>
> [dvryaboy] PIG-2359: Support more efficient Tuples when schemas are known
> (part 2)
>
> ------------------------------------------
> [...truncated 37037 lines...]
>    [junit]     at
> org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
>    [junit]     at
> org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
>    [junit]     at
> org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
>    [junit]     at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
>    [junit]     at
> junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
>    [junit]     at
> org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
>    [junit]     at
> org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
>    [junit]     at
> org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
>    [junit] 12/01/17 22:33:18 WARN datanode.FSDatasetAsyncDiskService:
> AsyncDiskService has already shut down.
>    [junit] 12/01/17 22:33:18 INFO mortbay.log: Stopped
> SelectChannelConnector@localhost:0
>    [junit] 12/01/17 22:33:18 INFO ipc.Server: Stopping server on 56123
>    [junit] 12/01/17 22:33:18 INFO ipc.Server: IPC Server handler 0 on
> 56123: exiting
>    [junit] 12/01/17 22:33:18 INFO ipc.Server: Stopping IPC Server listener
> on 56123
>    [junit] 12/01/17 22:33:18 INFO ipc.Server: IPC Server handler 2 on
> 56123: exiting
>    [junit] 12/01/17 22:33:18 INFO ipc.Server: Stopping IPC Server Responder
>    [junit] 12/01/17 22:33:18 INFO metrics.RpcInstrumentation: shut down
>    [junit] 12/01/17 22:33:18 INFO ipc.Server: IPC Server handler 1 on
> 56123: exiting
>    [junit] 12/01/17 22:33:18 INFO datanode.DataNode: Waiting for
> threadgroup to exit, active threads is 1
>    [junit] 12/01/17 22:33:18 WARN datanode.DataNode: DatanodeRegistration(
> 127.0.0.1:47324,
> storageID=DS-1849659132-67.195.138.20-47324-1326839095771, infoPort=37487,
> ipcPort=56123):DataXceiveServer:java.nio.channels.AsynchronousCloseException
>    [junit]     at
> java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
>    [junit]     at
> sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
>    [junit]     at
> sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
>    [junit]     at
> org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
>    [junit]     at java.lang.Thread.run(Thread.java:662)
>    [junit]
>    [junit] 12/01/17 22:33:18 INFO datanode.DataNode: Exiting
> DataXceiveServer
>    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Scheduling block
> blk_-5914952160057491263_1095 file
> build/test/data/dfs/data/data2/current/blk_-5914952160057491263 for deletion
>    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Scheduling block
> blk_-3299861067875593375_1102 file
> build/test/data/dfs/data/data2/current/blk_-3299861067875593375 for deletion
>    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Scheduling block
> blk_-2378485608018695930_1101 file
> build/test/data/dfs/data/data2/current/blk_-2378485608018695930 for deletion
>    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Deleted block
> blk_-5914952160057491263_1095 at file
> build/test/data/dfs/data/data2/current/blk_-5914952160057491263
>    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Scheduling block
> blk_1266225547534690160_1102 file
> build/test/data/dfs/data/data1/current/blk_1266225547534690160 for deletion
>    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Deleted block
> blk_-3299861067875593375_1102 at file
> build/test/data/dfs/data/data2/current/blk_-3299861067875593375
>    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Deleted block
> blk_1266225547534690160_1102 at file
> build/test/data/dfs/data/data1/current/blk_1266225547534690160
>    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Deleted block
> blk_-2378485608018695930_1101 at file
> build/test/data/dfs/data/data2/current/blk_-2378485608018695930
>    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Scheduling block
> blk_-5914952160057491263_1095 file
> build/test/data/dfs/data/data3/current/blk_-5914952160057491263 for deletion
>    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Scheduling block
> blk_-3299861067875593375_1102 file
> build/test/data/dfs/data/data4/current/blk_-3299861067875593375 for deletion
>    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Deleted block
> blk_-5914952160057491263_1095 at file
> build/test/data/dfs/data/data3/current/blk_-5914952160057491263
>    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Scheduling block
> blk_-2378485608018695930_1101 file
> build/test/data/dfs/data/data4/current/blk_-2378485608018695930 for deletion
>    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Deleted block
> blk_-3299861067875593375_1102 at file
> build/test/data/dfs/data/data4/current/blk_-3299861067875593375
>    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Scheduling block
> blk_1266225547534690160_1102 file
> build/test/data/dfs/data/data3/current/blk_1266225547534690160 for deletion
>    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Deleted block
> blk_-2378485608018695930_1101 at file
> build/test/data/dfs/data/data4/current/blk_-2378485608018695930
>    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Deleted block
> blk_1266225547534690160_1102 at file
> build/test/data/dfs/data/data3/current/blk_1266225547534690160
>    [junit] 12/01/17 22:33:19 INFO datanode.DataBlockScanner: Exiting
> DataBlockScanner thread.
>    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Waiting for
> threadgroup to exit, active threads is 0
>    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: DatanodeRegistration(
> 127.0.0.1:47324,
> storageID=DS-1849659132-67.195.138.20-47324-1326839095771, infoPort=37487,
> ipcPort=56123):Finishing DataNode in: FSDataset{dirpath='<
> https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data5/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data6/current'
> }>
>    [junit] 12/01/17 22:33:19 INFO ipc.Server: Stopping server on 56123
>    [junit] 12/01/17 22:33:19 INFO metrics.RpcInstrumentation: shut down
>    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Waiting for
> threadgroup to exit, active threads is 0
>    [junit] 12/01/17 22:33:19 INFO datanode.FSDatasetAsyncDiskService:
> Shutting down all async disk service threads...
>    [junit] 12/01/17 22:33:19 INFO datanode.FSDatasetAsyncDiskService: All
> async disk service threads have been shut down.
>    [junit] 12/01/17 22:33:19 WARN util.MBeans:
> Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1509900428
>    [junit] Shutting down DataNode 1
>    [junit] javax.management.InstanceNotFoundException:
> Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId1509900428
>    [junit]     at
> com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
>    [junit]     at
> com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
>    [junit]     at
> com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
>    [junit]     at
> com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
>    [junit]     at
> org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
>    [junit]     at
> org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
>    [junit]     at
> org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
>    [junit]     at
> org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
>    [junit]     at
> org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
>    [junit]     at
> org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
>    [junit]     at
> org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
>    [junit]     at
> org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
>    [junit]     at
> org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:128)
>    [junit]     at sun.reflect.NativeMethodAccessorImpl.invoke0(Native
> Method)
>    [junit]     at
> sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
>    [junit]     at
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
>    [junit]     at java.lang.reflect.Method.invoke(Method.java:597)
>    [junit]     at
> org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
>    [junit]     at
> org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
>    [junit]     at
> org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
>    [junit]     at
> org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
>    [junit]     at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
>    [junit]     at
> junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
>    [junit]     at
> org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
>    [junit]     at
> org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
>    [junit]     at
> org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
>    [junit] 12/01/17 22:33:19 WARN datanode.FSDatasetAsyncDiskService:
> AsyncDiskService has already shut down.
>    [junit] 12/01/17 22:33:19 INFO mortbay.log: Stopped
> SelectChannelConnector@localhost:0
>    [junit] 12/01/17 22:33:19 INFO ipc.Server: Stopping server on 40885
>    [junit] 12/01/17 22:33:19 INFO ipc.Server: IPC Server handler 0 on
> 40885: exiting
>    [junit] 12/01/17 22:33:19 INFO ipc.Server: Stopping IPC Server listener
> on 40885
>    [junit] 12/01/17 22:33:19 INFO metrics.RpcInstrumentation: shut down
>    [junit] 12/01/17 22:33:19 INFO ipc.Server: IPC Server handler 1 on
> 40885: exiting
>    [junit] 12/01/17 22:33:19 INFO ipc.Server: Stopping IPC Server Responder
>    [junit] 12/01/17 22:33:19 WARN datanode.DataNode: DatanodeRegistration(
> 127.0.0.1:45088,
> storageID=DS-1863537504-67.195.138.20-45088-1326839095442, infoPort=49950,
> ipcPort=40885):DataXceiveServer:java.nio.channels.AsynchronousCloseException
>    [junit]     at
> java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
>    [junit]     at
> sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
>    [junit]     at
> sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
>    [junit]     at
> org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
>    [junit]     at java.lang.Thread.run(Thread.java:662)
>    [junit]
>    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Exiting
> DataXceiveServer
>    [junit] 12/01/17 22:33:19 INFO ipc.Server: IPC Server handler 2 on
> 40885: exiting
>    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Waiting for
> threadgroup to exit, active threads is 1
>    [junit] 12/01/17 22:33:19 INFO datanode.DataBlockScanner: Exiting
> DataBlockScanner thread.
>    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: DatanodeRegistration(
> 127.0.0.1:45088,
> storageID=DS-1863537504-67.195.138.20-45088-1326839095442, infoPort=49950,
> ipcPort=40885):Finishing DataNode in: FSDataset{dirpath='<
> https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data3/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data4/current'
> }>
>    [junit] 12/01/17 22:33:19 INFO ipc.Server: Stopping server on 40885
>    [junit] 12/01/17 22:33:19 INFO metrics.RpcInstrumentation: shut down
>    [junit] 12/01/17 22:33:19 INFO datanode.DataNode: Waiting for
> threadgroup to exit, active threads is 0
>    [junit] 12/01/17 22:33:19 INFO datanode.FSDatasetAsyncDiskService:
> Shutting down all async disk service threads...
>    [junit] 12/01/17 22:33:19 INFO datanode.FSDatasetAsyncDiskService: All
> async disk service threads have been shut down.
>    [junit] 12/01/17 22:33:19 WARN util.MBeans:
> Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1895231096
>    [junit] javax.management.InstanceNotFoundException:
> Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-1895231096
>    [junit]     at
> com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
>    [junit]     at
> com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
>    [junit]     at
> com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
>    [junit]     at
> com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
>    [junit]     at
> org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
>    [junit]     at
> org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
>    [junit]     at
> org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
>    [junit]     at
> org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
>    [junit]     at
> org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
>    [junit]     at
> org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
>    [junit]     at
> org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
>    [junit]     at
> org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
>    [junit]     at
> org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:128)
>    [junit]     at sun.reflect.NativeMethodAccessorImpl.invoke0(Native
> Method)
>    [junit]     at
> sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
>    [junit]     at
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
>    [junit]     at java.lang.reflect.Method.invoke(Method.java:597)
>    [junit]     at
> org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
>    [junit]     at
> org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
>    [junit]     at
> org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
>    [junit]     at
> org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
>    [junit]     at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
>    [junit]     at
> junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
>    [junit]     at
> org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
>    [junit]     at
> org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
>    [junit]     at
> org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
>    [junit] 12/01/17 22:33:19 WARN datanode.FSDatasetAsyncDiskService:
> AsyncDiskService has already shut down.
>    [junit] Shutting down DataNode 0
>    [junit] 12/01/17 22:33:19 INFO mortbay.log: Stopped
> SelectChannelConnector@localhost:0
>    [junit] 12/01/17 22:33:20 INFO ipc.Server: Stopping server on 41896
>    [junit] 12/01/17 22:33:20 INFO ipc.Server: IPC Server handler 0 on
> 41896: exiting
>    [junit] 12/01/17 22:33:20 INFO ipc.Server: Stopping IPC Server Responder
>    [junit] 12/01/17 22:33:20 INFO ipc.Server: IPC Server handler 1 on
> 41896: exiting
>    [junit] 12/01/17 22:33:20 INFO ipc.Server: IPC Server handler 2 on
> 41896: exiting
>    [junit] 12/01/17 22:33:20 INFO ipc.Server: Stopping IPC Server listener
> on 41896
>    [junit] 12/01/17 22:33:20 INFO metrics.RpcInstrumentation: shut down
>    [junit] 12/01/17 22:33:20 INFO datanode.DataNode: Waiting for
> threadgroup to exit, active threads is 1
>    [junit] 12/01/17 22:33:20 WARN datanode.DataNode: DatanodeRegistration(
> 127.0.0.1:49647,
> storageID=DS-578389708-67.195.138.20-49647-1326839095094, infoPort=59242,
> ipcPort=41896):DataXceiveServer:java.nio.channels.AsynchronousCloseException
>    [junit]     at
> java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
>    [junit]     at
> sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
>    [junit]     at
> sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
>    [junit]     at
> org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:131)
>    [junit]     at java.lang.Thread.run(Thread.java:662)
>    [junit]
>    [junit] 12/01/17 22:33:20 INFO datanode.DataNode: Exiting
> DataXceiveServer
>    [junit] 12/01/17 22:33:20 INFO datanode.DataBlockScanner: Exiting
> DataBlockScanner thread.
>    [junit] 12/01/17 22:33:20 INFO hdfs.StateChange: BLOCK* ask
> 127.0.0.1:59660 to delete  blk_-5914952160057491263_1095
>    [junit] 12/01/17 22:33:20 INFO hdfs.StateChange: BLOCK* ask
> 127.0.0.1:47324 to delete  blk_-2378485608018695930_1101
> blk_-3299861067875593375_1102 blk_1266225547534690160_1102
>    [junit] 12/01/17 22:33:21 INFO datanode.DataNode: Waiting for
> threadgroup to exit, active threads is 0
>    [junit] 12/01/17 22:33:21 INFO datanode.DataNode: DatanodeRegistration(
> 127.0.0.1:49647,
> storageID=DS-578389708-67.195.138.20-49647-1326839095094, infoPort=59242,
> ipcPort=41896):Finishing DataNode in: FSDataset{dirpath='<
> https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data1/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data2/current'
> }>
>    [junit] 12/01/17 22:33:21 WARN util.MBeans:
> Hadoop:service=DataNode,name=DataNodeInfo
>    [junit] javax.management.InstanceNotFoundException:
> Hadoop:service=DataNode,name=DataNodeInfo
>    [junit]     at
> com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
>    [junit]     at
> com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
>    [junit]     at
> com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
>    [junit]     at
> com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
>    [junit]     at
> org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
>    [junit]     at
> org.apache.hadoop.hdfs.server.datanode.DataNode.unRegisterMXBean(DataNode.java:513)
>    [junit]     at
> org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:726)
>    [junit]     at
> org.apache.hadoop.hdfs.server.datanode.DataNode.run(DataNode.java:1442)
>    [junit]     at java.lang.Thread.run(Thread.java:662)
>    [junit] 12/01/17 22:33:21 INFO ipc.Server: Stopping server on 41896
>    [junit] 12/01/17 22:33:21 INFO metrics.RpcInstrumentation: shut down
>    [junit] 12/01/17 22:33:21 INFO datanode.DataNode: Waiting for
> threadgroup to exit, active threads is 0
>    [junit] 12/01/17 22:33:21 INFO datanode.FSDatasetAsyncDiskService:
> Shutting down all async disk service threads...
>    [junit] 12/01/17 22:33:21 INFO datanode.FSDatasetAsyncDiskService: All
> async disk service threads have been shut down.
>    [junit] 12/01/17 22:33:21 WARN util.MBeans:
> Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-982927745
>    [junit] javax.management.InstanceNotFoundException:
> Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-982927745
>    [junit]     at
> com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getMBean(DefaultMBeanServerInterceptor.java:1094)
>    [junit]     at
> com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.exclusiveUnregisterMBean(DefaultMBeanServerInterceptor.java:415)
>    [junit]     at
> com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.unregisterMBean(DefaultMBeanServerInterceptor.java:403)
>    [junit]     at
> com.sun.jmx.mbeanserver.JmxMBeanServer.unregisterMBean(JmxMBeanServer.java:506)
>    [junit]     at
> org.apache.hadoop.metrics2.util.MBeans.unregister(MBeans.java:71)
>    [junit]     at
> org.apache.hadoop.hdfs.server.datanode.FSDataset.shutdown(FSDataset.java:1934)
>    [junit]     at
> org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:788)
>    [junit]     at
> org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:566)
>    [junit]     at
> org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:550)
>    [junit]     at
> org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:87)
>    [junit]     at
> org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77)
>    [junit]     at
> org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68)
>    [junit]     at
> org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:128)
>    [junit]     at sun.reflect.NativeMethodAccessorImpl.invoke0(Native
> Method)
>    [junit]     at
> sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
>    [junit]     at
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
>    [junit]     at java.lang.reflect.Method.invoke(Method.java:597)
>    [junit]     at
> org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
>    [junit]     at
> org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
>    [junit]     at
> org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
>    [junit]     at
> org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37)
>    [junit]     at org.junit.runners.ParentRunner.run(ParentRunner.java:220)
>    [junit]     at
> junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)
>    [junit]     at
> org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
>    [junit]     at
> org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
>    [junit]     at
> org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
>    [junit] 12/01/17 22:33:21 WARN datanode.FSDatasetAsyncDiskService:
> AsyncDiskService has already shut down.
>    [junit] 12/01/17 22:33:21 INFO mortbay.log: Stopped
> SelectChannelConnector@localhost:0
>    [junit] 12/01/17 22:33:21 WARN namenode.FSNamesystem:
> ReplicationMonitor thread received
> InterruptedException.java.lang.InterruptedException: sleep interrupted
>    [junit] 12/01/17 22:33:21 INFO namenode.FSNamesystem: Number of
> transactions: 502 Total time for transactions(ms): 10Number of transactions
> batched in Syncs: 165 Number of syncs: 349 SyncTimes(ms): 3426 280
>    [junit] 12/01/17 22:33:21 INFO namenode.DecommissionManager:
> Interrupted Monitor
>    [junit] java.lang.InterruptedException: sleep interrupted
>    [junit]     at java.lang.Thread.sleep(Native Method)
>    [junit]     at
> org.apache.hadoop.hdfs.server.namenode.DecommissionManager$Monitor.run(DecommissionManager.java:65)
>    [junit]     at java.lang.Thread.run(Thread.java:662)
>    [junit] 12/01/17 22:33:21 INFO ipc.Server: Stopping server on 36202
>    [junit] 12/01/17 22:33:21 INFO ipc.Server: IPC Server handler 0 on
> 36202: exiting
>    [junit] 12/01/17 22:33:21 INFO ipc.Server: IPC Server handler 2 on
> 36202: exiting
>    [junit] 12/01/17 22:33:21 INFO ipc.Server: IPC Server handler 1 on
> 36202: exiting
>    [junit] 12/01/17 22:33:21 INFO ipc.Server: IPC Server handler 3 on
> 36202: exiting
>    [junit] 12/01/17 22:33:21 INFO ipc.Server: IPC Server handler 4 on
> 36202: exiting
>    [junit] 12/01/17 22:33:21 INFO ipc.Server: IPC Server handler 5 on
> 36202: exiting
>    [junit] 12/01/17 22:33:21 INFO ipc.Server: Stopping IPC Server listener
> on 36202
>    [junit] 12/01/17 22:33:21 INFO ipc.Server: IPC Server handler 6 on
> 36202: exiting
>    [junit] 12/01/17 22:33:21 INFO ipc.Server: IPC Server handler 7 on
> 36202: exiting
>    [junit] 12/01/17 22:33:21 INFO ipc.Server: IPC Server handler 9 on
> 36202: exiting
>    [junit] 12/01/17 22:33:21 INFO ipc.Server: IPC Server handler 8 on
> 36202: exiting
>    [junit] 12/01/17 22:33:21 INFO ipc.Server: Stopping IPC Server Responder
>    [junit] 12/01/17 22:33:21 INFO metrics.RpcInstrumentation: shut down
>    [junit] Tests run: 17, Failures: 3, Errors: 3, Time elapsed: 499.771 sec
>    [junit] Test org.apache.pig.test.TestStore FAILED
>    [junit] Running org.apache.pig.test.TestStringUDFs
>    [junit] 12/01/17 22:33:22 WARN builtin.SUBSTRING: No logger object
> provided to UDF: org.apache.pig.builtin.SUBSTRING.
> java.lang.NullPointerException
>    [junit] 12/01/17 22:33:22 WARN builtin.SUBSTRING: No logger object
> provided to UDF: org.apache.pig.builtin.SUBSTRING.
> java.lang.StringIndexOutOfBoundsException: String index out of range: -2
>    [junit] 12/01/17 22:33:22 WARN builtin.SUBSTRING: No logger object
> provided to UDF: org.apache.pig.builtin.SUBSTRING.
> java.lang.StringIndexOutOfBoundsException: String index out of range: -1
>    [junit] 12/01/17 22:33:22 WARN builtin.SUBSTRING: No logger object
> provided to UDF: org.apache.pig.builtin.SUBSTRING.
> java.lang.StringIndexOutOfBoundsException: String index out of range: -8
>    [junit] 12/01/17 22:33:22 WARN builtin.SUBSTRING: No logger object
> provided to UDF: org.apache.pig.builtin.SUBSTRING.
> java.lang.StringIndexOutOfBoundsException: String index out of range: -2
>    [junit] 12/01/17 22:33:22 WARN builtin.INDEXOF: No logger object
> provided to UDF: org.apache.pig.builtin.INDEXOF. Failed to process input;
> error - null
>    [junit] 12/01/17 22:33:22 WARN builtin.LAST_INDEX_OF: No logger object
> provided to UDF: org.apache.pig.builtin.LAST_INDEX_OF. Failed to process
> input; error - null
>    [junit] Tests run: 11, Failures: 0, Errors: 0, Time elapsed: 0.102 sec
>   [delete] Deleting directory /tmp/pig_junit_tmp1053430922
>
> BUILD FAILED
> <https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:774: The
> following error occurred while executing this line:
> <https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:831: Tests
> failed!
>
> Total time: 22 minutes 12 seconds
> Build step 'Execute shell' marked build as failure
> [FINDBUGS] Skipping publisher since build result is FAILURE
> Recording test results
> Publishing Javadoc
> Archiving artifacts
> Recording fingerprints
> Publishing Clover coverage report...
> No Clover report will be published due to a Build Failure
>
>