You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@tajo.apache.org by Apache Jenkins Server <je...@builds.apache.org> on 2015/09/09 11:36:49 UTC

Build failed in Jenkins: Tajo-0.11.0-nightly #23

See <https://builds.apache.org/job/Tajo-0.11.0-nightly/23/changes>

Changes:

[jihoonson] TAJO-1782: Check ON_ERROR_STOP flag in TSQL when error is occured.

------------------------------------------
[...truncated 744457 lines...]
	at com.sun.proxy.$Proxy83.fsync(Unknown Source)
	at org.apache.hadoop.hdfs.DFSOutputStream.flushOrSync(DFSOutputStream.java:2022)
	at org.apache.hadoop.hdfs.DFSOutputStream.hsync(DFSOutputStream.java:1898)
	at org.apache.hadoop.fs.FSDataOutputStream.hsync(FSDataOutputStream.java:139)
	at org.apache.tajo.ha.HdfsServiceTracker.createMasterFile(HdfsServiceTracker.java:244)
	at org.apache.tajo.ha.HdfsServiceTracker.register(HdfsServiceTracker.java:155)
	at org.apache.tajo.ha.HdfsServiceTracker$PingChecker.run(HdfsServiceTracker.java:374)
	at java.lang.Thread.run(Thread.java:724)
2015-09-09 09:36:28,275 WARN: org.apache.hadoop.hdfs.DFSClient (flushOrSync(2047)) - Error while syncing
org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException): No lease on /tajo/system/ha/active/127.0.0.1_40307 (inode 29836): File does not exist. Holder DFSClient_NONMAPREDUCE_-682363687_1 does not have any open files.
	at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3433)
	at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.fsync(FSNamesystem.java:3998)
	at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.fsync(NameNodeRpcServer.java:1210)
	at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.fsync(ClientNamenodeProtocolServerSideTranslatorPB.java:903)
	at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:616)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:969)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2049)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2045)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1657)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2045)

	at org.apache.hadoop.ipc.Client.call(Client.java:1476)
	at org.apache.hadoop.ipc.Client.call(Client.java:1407)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:229)
	at com.sun.proxy.$Proxy42.fsync(Unknown Source)
	at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.fsync(ClientNamenodeProtocolTranslatorPB.java:838)
	at sun.reflect.GeneratedMethodAccessor408.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:187)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102)
	at com.sun.proxy.$Proxy43.fsync(Unknown Source)
	at sun.reflect.GeneratedMethodAccessor408.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:279)
	at com.sun.proxy.$Proxy83.fsync(Unknown Source)
	at sun.reflect.GeneratedMethodAccessor408.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:279)
	at com.sun.proxy.$Proxy83.fsync(Unknown Source)
	at org.apache.hadoop.hdfs.DFSOutputStream.flushOrSync(DFSOutputStream.java:2022)
	at org.apache.hadoop.hdfs.DFSOutputStream.hsync(DFSOutputStream.java:1898)
	at org.apache.hadoop.fs.FSDataOutputStream.hsync(FSDataOutputStream.java:139)
	at org.apache.tajo.ha.HdfsServiceTracker.createMasterFile(HdfsServiceTracker.java:244)
	at org.apache.tajo.ha.HdfsServiceTracker.register(HdfsServiceTracker.java:155)
	at org.apache.tajo.ha.HdfsServiceTracker$PingChecker.run(HdfsServiceTracker.java:374)
	at java.lang.Thread.run(Thread.java:724)
2015-09-09 09:36:28,275 WARN: org.apache.hadoop.hdfs.DFSClient (closeResponder(612)) - Caught exception 
java.lang.InterruptedException
	at java.lang.Object.wait(Native Method)
	at java.lang.Thread.join(Thread.java:1260)
	at java.lang.Thread.join(Thread.java:1334)
	at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.closeResponder(DFSOutputStream.java:610)
	at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.closeInternal(DFSOutputStream.java:578)
	at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.run(DFSOutputStream.java:574)
2015-09-09 09:36:28,279 ERROR: org.apache.hadoop.hdfs.server.datanode.DataNode (run(278)) - 127.0.0.1:53472:DataXceiver error processing WRITE_BLOCK operation  src: /127.0.0.1:54587 dst: /127.0.0.1:53472
java.io.IOException: Premature EOF from inputStream
	at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:201)
	at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:213)
	at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:134)
	at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:109)
	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:472)
	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:849)
	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:804)
	at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:137)
	at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:74)
	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:251)
	at java.lang.Thread.run(Thread.java:724)
2015-09-09 09:36:29,150 INFO: BlockStateChange (invalidateWorkForOneNode(3488)) - BLOCK* BlockManager: ask 127.0.0.1:53472 to delete [blk_1073748725_7901, blk_1073748709_7885, blk_1073748718_7894]
2015-09-09 09:36:29,263 INFO: BlockStateChange (processAndHandleReportedBlock(3171)) - BLOCK* addBlock: block blk_1073748726_7902 on node 127.0.0.1:53472 size 134217728 does not belong to any file
2015-09-09 09:36:29,263 INFO: BlockStateChange (add(115)) - BLOCK* InvalidateBlocks: add blk_1073748726_7902 to 127.0.0.1:53472
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.382 sec - in org.apache.tajo.parser.sql.TestSQLAnalyzer
ords.
Sep 9, 2015 9:21:20 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordReader: at row 0. reading next block
Sep 9, 2015 9:21:20 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordReader: block read in memory in 1 ms. row count = 2
Sep 9, 2015 9:21:34 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordWriter: Flushing mem columnStore to file. allocated memory: 26
Sep 9, 2015 9:21:34 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 43B for [l_orderkey] INT32: 1 values, 10B raw, 10B comp, 1 pages, encodings: [PLAIN, RLE, BIT_PACKED]
Sep 9, 2015 9:21:34 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 65B for [l_shipdate_function] BINARY: 1 values, 20B raw, 20B comp, 1 pages, encodings: [PLAIN, RLE, BIT_PACKED]
Sep 9, 2015 9:21:34 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordWriter: Flushing mem columnStore to file. allocated memory: 26
Sep 9, 2015 9:21:34 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 43B for [l_orderkey] INT32: 1 values, 10B raw, 10B comp, 1 pages, encodings: [PLAIN, RLE, BIT_PACKED]
Sep 9, 2015 9:21:34 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 65B for [l_shipdate_function] BINARY: 1 values, 20B raw, 20B comp, 1 pages, encodings: [PLAIN, RLE, BIT_PACKED]
Sep 9, 2015 9:21:34 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordWriter: Flushing mem columnStore to file. allocated memory: 26
Sep 9, 2015 9:21:34 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 43B for [l_orderkey] INT32: 1 values, 10B raw, 10B comp, 1 pages, encodings: [PLAIN, RLE, BIT_PACKED]
Sep 9, 2015 9:21:34 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 65B for [l_shipdate_function] BINARY: 1 values, 20B raw, 20B comp, 1 pages, encodings: [PLAIN, RLE, BIT_PACKED]
Sep 9, 2015 9:21:34 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordWriter: Flushing mem columnStore to file. allocated memory: 26
Sep 9, 2015 9:21:34 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 43B for [l_orderkey] INT32: 1 values, 10B raw, 10B comp, 1 pages, encodings: [PLAIN, RLE, BIT_PACKED]
Sep 9, 2015 9:21:34 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 65B for [l_shipdate_function] BINARY: 1 values, 20B raw, 20B comp, 1 pages, encodings: [PLAIN, RLE, BIT_PACKED]
Sep 9, 2015 9:21:34 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordWriter: Flushing mem columnStore to file. allocated memory: 26
Sep 9, 2015 9:21:34 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 43B for [l_orderkey] INT32: 1 values, 10B raw, 10B comp, 1 pages, encodings: [PLAIN, RLE, BIT_PACKED]
Sep 9, 2015 9:21:34 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 65B for [l_shipdate_function] BINARY: 1 values, 20B raw, 20B comp, 1 pages, encodings: [PLAIN, RLE, BIT_PACKED]
Sep 9, 2015 9:21:35 AM INFO: org.apache.parquet.hadoop.ParquetFileReader: Initiating action with parallelism: 5
Sep 9, 2015 9:21:35 AM INFO: org.apache.parquet.hadoop.ParquetFileReader: Initiating action with parallelism: 5
Sep 9, 2015 9:21:35 AM INFO: org.apache.parquet.hadoop.ParquetFileReader: Initiating action with parallelism: 5
Sep 9, 2015 9:21:35 AM INFO: org.apache.parquet.hadoop.ParquetFileReader: reading another 1 footers
Sep 9, 2015 9:21:35 AM INFO: org.apache.parquet.hadoop.ParquetFileReader: reading another 1 footers
Sep 9, 2015 9:21:35 AM INFO: org.apache.parquet.hadoop.ParquetFileReader: Initiating action with parallelism: 5
Sep 9, 2015 9:21:35 AM INFO: org.apache.parquet.hadoop.ParquetFileReader: reading another 1 footers
Sep 9, 2015 9:21:35 AM INFO: org.apache.parquet.hadoop.ParquetFileReader: Initiating action with parallelism: 5
Sep 9, 2015 9:21:35 AM INFO: org.apache.parquet.hadoop.ParquetFileReader: Initiating action with parallelism: 5
Sep 9, 2015 9:21:35 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordReader: RecordReader initialized will read a total of 1 records.
Sep 9, 2015 9:21:35 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordReader: at row 0. reading next block
Sep 9, 2015 9:21:35 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordReader: RecordReader initialized will read a total of 1 records.
Sep 9, 2015 9:21:35 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordReader: at row 0. reading next block
Sep 9, 2015 9:21:35 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordReader: RecordReader initialized will read a total of 1 records.
Sep 9, 2015 9:21:35 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordReader: at row 0. reading next block
Sep 9, 2015 9:21:35 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordReader: block read in memory in 1 ms. row count = 1
Sep 9, 2015 9:21:35 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordReader: block read in memory in 1 ms. row count = 1
Sep 9, 2015 9:21:35 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordReader: block read in memory in 1 ms. row count = 1
Sep 9, 2015 9:21:35 AM INFO: org.apache.parquet.hadoop.ParquetFileReader: Initiating action with parallelism: 5
Sep 9, 2015 9:21:35 AM INFO: org.apache.parquet.hadoop.ParquetFileReader: reading another 1 footers
Sep 9, 2015 9:21:35 AM INFO: org.apache.parquet.hadoop.ParquetFileReader: Initiating action with parallelism: 5
Sep 9, 2015 9:21:35 AM INFO: org.apache.parquet.hadoop.ParquetFileReader: Initiating action with parallelism: 5
Sep 9, 2015 9:21:35 AM INFO: org.apache.parquet.hadoop.ParquetFileReader: reading another 1 footers
Sep 9, 2015 9:21:35 AM INFO: org.apache.parquet.hadoop.ParquetFileReader: Initiating action with parallelism: 5
Sep 9, 2015 9:21:35 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordReader: RecordReader initialized will read a total of 1 records.
Sep 9, 2015 9:21:35 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordReader: at row 0. reading next block
Sep 9, 2015 9:21:35 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordReader: block read in memory in 1 ms. row count = 1
Sep 9, 2015 9:21:35 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordReader: RecordReader initialized will read a total of 1 records.
Sep 9, 2015 9:21:35 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordReader: at row 0. reading next block
Sep 9, 2015 9:21:35 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordReader: block read in memory in 1 ms. row count = 1
Sep 9, 2015 9:21:38 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordWriter: Flushing mem columnStore to file. allocated memory: 212
Sep 9, 2015 9:21:38 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 43B for [l_orderkey] INT32: 5 values, 10B raw, 10B comp, 1 pages, encodings: [RLE, PLAIN_DICTIONARY, BIT_PACKED], dic { 3 entries, 12B raw, 3B comp}
Sep 9, 2015 9:21:38 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 123B for [l_shipdate] BINARY: 5 values, 76B raw, 76B comp, 1 pages, encodings: [PLAIN, RLE, BIT_PACKED]
Sep 9, 2015 9:21:38 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 123B for [l_shipdate_function] BINARY: 5 values, 76B raw, 76B comp, 1 pages, encodings: [PLAIN, RLE, BIT_PACKED]
Sep 9, 2015 9:21:38 AM INFO: org.apache.parquet.hadoop.ParquetFileReader: Initiating action with parallelism: 5
Sep 9, 2015 9:21:38 AM INFO: org.apache.parquet.hadoop.ParquetFileReader: reading another 1 footers
Sep 9, 2015 9:21:38 AM INFO: org.apache.parquet.hadoop.ParquetFileReader: Initiating action with parallelism: 5
Sep 9, 2015 9:21:38 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordReader: RecordReader initialized will read a total of 5 records.
Sep 9, 2015 9:21:38 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordReader: at row 0. reading next block
Sep 9, 2015 9:21:38 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordReader: block read in memory in 1 ms. row count = 5
2015-09-09 09:36:29,726 INFO: org.mortbay.log (info(67)) - Shutdown hook executing
2015-09-09 09:36:29,727 INFO: org.mortbay.log (info(67)) - Shutdown hook complete
2015-09-09 09:36:29,730 INFO: org.apache.tajo.session.SessionManager (removeSession(86)) - Session f2700276-e7a1-4b85-ba61-6406a932f4ae is removed.
2015-09-09 09:36:29,729 INFO: org.apache.tajo.worker.TajoWorker (run(570)) - ============================================
2015-09-09 09:36:29,730 INFO: org.apache.tajo.master.TajoMaster (run(540)) - ============================================
2015-09-09 09:36:29,734 INFO: org.apache.tajo.worker.TajoWorker (run(571)) - TajoWorker received SIGINT Signal
2015-09-09 09:36:29,734 INFO: org.apache.tajo.worker.TajoWorker (run(572)) - ============================================
2015-09-09 09:36:29,734 INFO: org.apache.tajo.master.TajoMaster (run(541)) - TajoMaster received SIGINT Signal
2015-09-09 09:36:29,736 INFO: org.apache.tajo.master.TajoMaster (run(542)) - ============================================
2015-09-09 09:36:29,736 INFO: org.apache.tajo.util.history.HistoryCleaner (run(136)) - History cleaner stopped
2015-09-09 09:36:29,736 INFO: org.apache.tajo.util.history.HistoryWriter (run(275)) - HistoryWriter_asf909.gq1.ygridcore.net_22880 stopped.
2015-09-09 09:36:29,737 INFO: org.apache.tajo.rpc.NettyServerBase (shutdown(173)) - Rpc (Tajo-REST) listened on 0:0:0:0:0:0:0:0:22879) shutdown
2015-09-09 09:36:29,739 INFO: org.apache.tajo.ws.rs.TajoRestService (serviceStop(129)) - Tajo Rest Service stopped.
2015-09-09 09:36:29,742 INFO: org.apache.tajo.util.history.HistoryWriter (run(275)) - HistoryWriter_127.0.0.1_22878 stopped.
2015-09-09 09:36:29,745 INFO: BlockStateChange (logAddStoredBlock(2624)) - BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:53472 is added to blk_1073741857_1033{UCState=UNDER_CONSTRUCTION, truncateBlock=null, primaryNodeIndex=-1, replicas=[ReplicaUC[[DISK]DS-6965e7bb-3ad5-44fd-88c9-11f305ab2c13:NORMAL:127.0.0.1:53472|RBW]]} size 731
2015-09-09 09:36:29,746 INFO: org.apache.tajo.util.history.HistoryCleaner (run(136)) - History cleaner stopped
2015-09-09 09:36:29,746 INFO: org.apache.tajo.rpc.NettyServerBase (shutdown(173)) - Rpc (QueryCoordinatorProtocol) listened on 127.0.0.1:22878) shutdown
2015-09-09 09:36:29,748 INFO: org.apache.tajo.rpc.NettyServerBase (shutdown(173)) - Rpc (TajoMasterClientProtocol) listened on 127.0.0.1:22877) shutdown
2015-09-09 09:36:29,748 INFO: org.apache.tajo.catalog.CatalogServer (serviceStop(182)) - Catalog Server (127.0.0.1:22876) shutdown
2015-09-09 09:36:29,749 INFO: org.apache.tajo.rpc.NettyServerBase (shutdown(173)) - Rpc (CatalogProtocol) listened on 127.0.0.1:22876) shutdown
2015-09-09 09:36:29,749 INFO: org.apache.tajo.catalog.store.DerbyStore (close(2783)) - Close database (jdbc:derby:memory:<https://builds.apache.org/job/Tajo-0.11.0-nightly/ws/tajo-core-tests/target/test-data/0bf4bc01-6fe1-427b-b249-1b48ceab7e75/db;create=true)>
2015-09-09 09:36:29,749 WARN: org.apache.tajo.rpc.NettyClientBase (doReconnect(200)) - Exception [org.apache.tajo.ipc.TajoMasterClientProtocol(/127.0.0.1:22877)]: ConnectException: Connection refused: /127.0.0.1:22877
Try to reconnect : /127.0.0.1:22877
2015-09-09 09:36:29,750 INFO: org.apache.tajo.rpc.NettyServerBase (shutdown(173)) - Rpc (TajoResourceTrackerProtocol) listened on 127.0.0.1:22875) shutdown
2015-09-09 09:36:29,751 INFO: org.apache.tajo.master.TajoMaster (serviceStop(408)) - Tajo Master main thread exiting
2015-09-09 09:36:29,843 INFO: org.apache.tajo.worker.NodeStatusUpdater (serviceStop(111)) - NodeStatusUpdater stopped.
2015-09-09 09:36:29,843 INFO: org.apache.tajo.worker.NodeStatusUpdater (run(262)) - Heartbeat Thread stopped.
2015-09-09 09:36:29,845 INFO: org.apache.tajo.rpc.NettyServerBase (shutdown(173)) - Rpc (QueryMasterProtocol) listened on 0:0:0:0:0:0:0:0:22882) shutdown
2015-09-09 09:36:29,845 INFO: org.apache.tajo.querymaster.QueryMasterManagerService (serviceStop(106)) - QueryMasterManagerService stopped
2015-09-09 09:36:29,845 INFO: org.apache.tajo.querymaster.QueryMaster (run(425)) - QueryMaster heartbeat thread stopped
2015-09-09 09:36:29,845 INFO: org.apache.tajo.querymaster.QueryMaster (serviceStop(161)) - QueryMaster stopped
2015-09-09 09:36:29,846 INFO: org.apache.tajo.worker.TajoWorkerClientService (stop(98)) - TajoWorkerClientService stopping
2015-09-09 09:36:29,847 INFO: org.apache.tajo.rpc.NettyServerBase (shutdown(173)) - Rpc (QueryMasterClientProtocol) listened on 0:0:0:0:0:0:0:0:22881) shutdown
2015-09-09 09:36:29,848 INFO: org.apache.tajo.worker.TajoWorkerClientService (stop(102)) - TajoWorkerClientService stopped
2015-09-09 09:36:29,849 INFO: org.apache.tajo.rpc.NettyServerBase (shutdown(173)) - Rpc (TajoWorkerProtocol) listened on 0:0:0:0:0:0:0:0:22880) shutdown
2015-09-09 09:36:29,849 INFO: org.apache.tajo.worker.TajoWorkerManagerService (serviceStop(93)) - TajoWorkerManagerService stopped
2015-09-09 09:36:29,851 INFO: org.apache.tajo.worker.TajoWorker (serviceStop(380)) - TajoWorker main thread exiting
2015-09-09 09:36:29,992 INFO: org.apache.tajo.catalog.store.DerbyStore (shutdown(68)) - Derby shutdown complete normally.
2015-09-09 09:36:29,992 INFO: org.apache.tajo.catalog.store.DerbyStore (shutdown(75)) - Shutdown database
2015-09-09 09:36:30,751 WARN: org.apache.tajo.rpc.NettyClientBase (doReconnect(200)) - Exception [org.apache.tajo.ipc.TajoMasterClientProtocol(/127.0.0.1:22877)]: ConnectException: Connection refused: /127.0.0.1:22877
Try to reconnect : /127.0.0.1:22877
2015-09-09 09:36:31,752 ERROR: org.apache.tajo.rpc.NettyClientBase (doReconnect(211)) - Max retry count has been exceeded. attempts=3 caused by: java.net.ConnectException: Connection refused: /127.0.0.1:22877

Results :

Tests in error: 
  TestHAServiceHDFSImpl.testAutoFailOver:82->verifyDataBaseAndTable:152 ยป TajoRuntime

Tests run: 1632, Failures: 0, Errors: 1, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Tajo Main ......................................... SUCCESS [  1.622 s]
[INFO] Tajo Project POM .................................. SUCCESS [  1.223 s]
[INFO] Tajo Maven Plugins ................................ SUCCESS [  2.554 s]
[INFO] Tajo Common ....................................... SUCCESS [ 37.036 s]
[INFO] Tajo Algebra ...................................... SUCCESS [  2.436 s]
[INFO] Tajo Catalog Common ............................... SUCCESS [  4.773 s]
[INFO] Tajo Plan ......................................... SUCCESS [  6.414 s]
[INFO] Tajo Rpc Common ................................... SUCCESS [  1.291 s]
[INFO] Tajo Protocol Buffer Rpc .......................... SUCCESS [ 48.407 s]
[INFO] Tajo Catalog Client ............................... SUCCESS [  1.322 s]
[INFO] Tajo Catalog Server ............................... SUCCESS [ 59.613 s]
[INFO] Tajo Storage Common ............................... SUCCESS [  2.768 s]
[INFO] Tajo HDFS Storage ................................. SUCCESS [ 59.952 s]
[INFO] Tajo PullServer ................................... SUCCESS [  1.304 s]
[INFO] Tajo Client ....................................... SUCCESS [  2.330 s]
[INFO] Tajo CLI tools .................................... SUCCESS [  1.753 s]
[INFO] Tajo SQL Parser ................................... SUCCESS [  4.823 s]
[INFO] ASM (thirdparty) .................................. SUCCESS [  1.558 s]
[INFO] Tajo RESTful Container ............................ SUCCESS [  3.902 s]
[INFO] Tajo Metrics ...................................... SUCCESS [  1.326 s]
[INFO] Tajo Core ......................................... SUCCESS [  5.621 s]
[INFO] Tajo RPC .......................................... SUCCESS [  0.918 s]
[INFO] Tajo Catalog Drivers Hive ......................... SUCCESS [ 29.232 s]
[INFO] Tajo Catalog Drivers .............................. SUCCESS [  0.064 s]
[INFO] Tajo Catalog ...................................... SUCCESS [  0.976 s]
[INFO] Tajo Client Example ............................... SUCCESS [  1.020 s]
[INFO] Tajo HBase Storage ................................ SUCCESS [  3.856 s]
[INFO] Tajo Cluster Tests ................................ SUCCESS [  2.395 s]
[INFO] Tajo JDBC Driver .................................. SUCCESS [ 34.977 s]
[INFO] Tajo Storage ...................................... SUCCESS [  1.014 s]
[INFO] Tajo Distribution ................................. SUCCESS [  5.517 s]
[INFO] Tajo Core Tests ................................... FAILURE [22:10 min]
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 27:43 min
[INFO] Finished at: 2015-09-09T09:36:32+00:00
[INFO] Final Memory: 77M/469M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project tajo-core-tests: There are test failures.
[ERROR] 
[ERROR] Please refer to <https://builds.apache.org/job/Tajo-0.11.0-nightly/ws/tajo-core-tests/target/surefire-reports> for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :tajo-core-tests
Build step 'Execute shell' marked build as failure
Archiving artifacts
Sending artifact delta relative to Tajo-0.11.0-nightly #22
Archived 1 artifacts
Archive block size is 32768
Received 0 blocks and 59328492 bytes
Compression is 0.0%
Took 14 sec
Recording test results