You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@tajo.apache.org by Apache Jenkins Server <je...@builds.apache.org> on 2015/08/19 05:29:27 UTC

Build failed in Jenkins: Tajo-master-nightly #803

See <https://builds.apache.org/job/Tajo-master-nightly/803/changes>

Changes:

[jhkim] TAJO-1777: JsonLineDeserializer returns invalid unicode text, if contains control character

------------------------------------------
[...truncated 1896 lines...]
	at org.apache.hadoop.fs.RawLocalFileSystem$LocalFSFileOutputStream.<init>(RawLocalFileSystem.java:225)
	at org.apache.hadoop.fs.RawLocalFileSystem$LocalFSFileOutputStream.<init>(RawLocalFileSystem.java:209)
	at org.apache.hadoop.fs.RawLocalFileSystem.createOutputStreamWithMode(RawLocalFileSystem.java:305)
	at org.apache.hadoop.fs.RawLocalFileSystem.create(RawLocalFileSystem.java:294)
	at org.apache.hadoop.fs.RawLocalFileSystem.create(RawLocalFileSystem.java:326)
	at org.apache.hadoop.fs.ChecksumFileSystem$ChecksumFSOutputSummer.<init>(ChecksumFileSystem.java:393)
	at org.apache.hadoop.fs.ChecksumFileSystem.create(ChecksumFileSystem.java:456)
	at org.apache.hadoop.fs.ChecksumFileSystem.create(ChecksumFileSystem.java:435)
	at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:909)
	at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:890)
	at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:787)
	at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:776)
	at org.apache.tajo.storage.text.DelimitedTextFile$DelimitedTextFileAppender.init(DelimitedTextFile.java:161)
	at org.apache.tajo.storage.TestFileTablespace.testGetScannerAndAppender(TestFileTablespace.java:88)

testGetFileTablespace(org.apache.tajo.storage.TestFileTablespace)  Time elapsed: 0.17 sec  <<< ERROR!
java.lang.OutOfMemoryError: unable to create new native thread
	at java.lang.Thread.start0(Native Method)
	at java.lang.Thread.start(Thread.java:693)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.saveFSImageInAllDirs(FSImage.java:1169)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.saveFSImageInAllDirs(FSImage.java:1133)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:163)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:991)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:342)
	at org.apache.hadoop.hdfs.DFSTestUtil.formatNameNode(DFSTestUtil.java:176)
	at org.apache.hadoop.hdfs.MiniDFSCluster.createNameNodesAndSetConf(MiniDFSCluster.java:973)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:811)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:471)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:430)
	at org.apache.tajo.storage.TestFileTablespace.testGetFileTablespace(TestFileTablespace.java:219)

Running org.apache.tajo.storage.TestDelimitedTextFile
Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.119 sec - in org.apache.tajo.storage.TestDelimitedTextFile
Running org.apache.tajo.storage.TestByteBufLineReader
Formatting using clusterid: testClusterID
Tests run: 3, Failures: 0, Errors: 3, Skipped: 0, Time elapsed: 0.245 sec <<< FAILURE! - in org.apache.tajo.storage.TestByteBufLineReader
testReaderWithLocalFS(org.apache.tajo.storage.TestByteBufLineReader)  Time elapsed: 0.066 sec  <<< ERROR!
java.io.IOException: Cannot run program "chmod": error=11, Resource temporarily unavailable
	at java.lang.UNIXProcess.forkAndExec(Native Method)
	at java.lang.UNIXProcess.<init>(UNIXProcess.java:135)
	at java.lang.ProcessImpl.start(ProcessImpl.java:130)
	at java.lang.ProcessBuilder.start(ProcessBuilder.java:1022)
	at org.apache.hadoop.util.Shell.runCommand(Shell.java:486)
	at org.apache.hadoop.util.Shell.run(Shell.java:456)
	at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:722)
	at org.apache.hadoop.util.Shell.execCommand(Shell.java:815)
	at org.apache.hadoop.util.Shell.execCommand(Shell.java:798)
	at org.apache.hadoop.fs.RawLocalFileSystem.setPermission(RawLocalFileSystem.java:728)
	at org.apache.hadoop.fs.RawLocalFileSystem$LocalFSFileOutputStream.<init>(RawLocalFileSystem.java:225)
	at org.apache.hadoop.fs.RawLocalFileSystem$LocalFSFileOutputStream.<init>(RawLocalFileSystem.java:209)
	at org.apache.hadoop.fs.RawLocalFileSystem.createOutputStreamWithMode(RawLocalFileSystem.java:305)
	at org.apache.hadoop.fs.RawLocalFileSystem.create(RawLocalFileSystem.java:294)
	at org.apache.hadoop.fs.RawLocalFileSystem.create(RawLocalFileSystem.java:326)
	at org.apache.hadoop.fs.ChecksumFileSystem$ChecksumFSOutputSummer.<init>(ChecksumFileSystem.java:393)
	at org.apache.hadoop.fs.ChecksumFileSystem.create(ChecksumFileSystem.java:456)
	at org.apache.hadoop.fs.ChecksumFileSystem.create(ChecksumFileSystem.java:435)
	at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:909)
	at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:890)
	at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:787)
	at org.apache.tajo.storage.TestByteBufLineReader.testReaderWithLocalFS(TestByteBufLineReader.java:68)

testReaderWithDFS(org.apache.tajo.storage.TestByteBufLineReader)  Time elapsed: 0.111 sec  <<< ERROR!
java.lang.OutOfMemoryError: unable to create new native thread
	at java.lang.Thread.start0(Native Method)
	at java.lang.Thread.start(Thread.java:693)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.saveFSImageInAllDirs(FSImage.java:1169)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.saveFSImageInAllDirs(FSImage.java:1133)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:163)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:991)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:342)
	at org.apache.hadoop.hdfs.DFSTestUtil.formatNameNode(DFSTestUtil.java:176)
	at org.apache.hadoop.hdfs.MiniDFSCluster.createNameNodesAndSetConf(MiniDFSCluster.java:973)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:811)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:471)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:430)
	at org.apache.tajo.storage.TestByteBufLineReader.testReaderWithDFS(TestByteBufLineReader.java:96)

testReaderWithNIO(org.apache.tajo.storage.TestByteBufLineReader)  Time elapsed: 0.066 sec  <<< ERROR!
java.io.IOException: Cannot run program "chmod": error=11, Resource temporarily unavailable
	at java.lang.UNIXProcess.forkAndExec(Native Method)
	at java.lang.UNIXProcess.<init>(UNIXProcess.java:135)
	at java.lang.ProcessImpl.start(ProcessImpl.java:130)
	at java.lang.ProcessBuilder.start(ProcessBuilder.java:1022)
	at org.apache.hadoop.util.Shell.runCommand(Shell.java:486)
	at org.apache.hadoop.util.Shell.run(Shell.java:456)
	at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:722)
	at org.apache.hadoop.util.Shell.execCommand(Shell.java:815)
	at org.apache.hadoop.util.Shell.execCommand(Shell.java:798)
	at org.apache.hadoop.fs.RawLocalFileSystem.setPermission(RawLocalFileSystem.java:728)
	at org.apache.hadoop.fs.RawLocalFileSystem$LocalFSFileOutputStream.<init>(RawLocalFileSystem.java:225)
	at org.apache.hadoop.fs.RawLocalFileSystem$LocalFSFileOutputStream.<init>(RawLocalFileSystem.java:209)
	at org.apache.hadoop.fs.RawLocalFileSystem.createOutputStreamWithMode(RawLocalFileSystem.java:305)
	at org.apache.hadoop.fs.RawLocalFileSystem.create(RawLocalFileSystem.java:294)
	at org.apache.hadoop.fs.RawLocalFileSystem.create(RawLocalFileSystem.java:326)
	at org.apache.hadoop.fs.ChecksumFileSystem$ChecksumFSOutputSummer.<init>(ChecksumFileSystem.java:393)
	at org.apache.hadoop.fs.ChecksumFileSystem.create(ChecksumFileSystem.java:456)
	at org.apache.hadoop.fs.ChecksumFileSystem.create(ChecksumFileSystem.java:435)
	at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:909)
	at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:890)
	at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:787)
	at org.apache.tajo.storage.TestByteBufLineReader.testReaderWithNIO(TestByteBufLineReader.java:137)

Running org.apache.tajo.storage.TestLineReader
Tests run: 6, Failures: 0, Errors: 1, Skipped: 0, Time elapsed: 0.436 sec <<< FAILURE! - in org.apache.tajo.storage.TestLineReader
testByteBufLineReader(org.apache.tajo.storage.TestLineReader)  Time elapsed: 0.068 sec  <<< ERROR!
java.lang.OutOfMemoryError: unable to create new native thread
	at java.lang.Thread.start0(Native Method)
	at java.lang.Thread.start(Thread.java:693)
	at org.apache.hadoop.util.Shell.runCommand(Shell.java:522)
	at org.apache.hadoop.util.Shell.run(Shell.java:456)
	at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:722)
	at org.apache.hadoop.util.Shell.execCommand(Shell.java:815)
	at org.apache.hadoop.util.Shell.execCommand(Shell.java:798)
	at org.apache.hadoop.fs.RawLocalFileSystem.setPermission(RawLocalFileSystem.java:728)
	at org.apache.hadoop.fs.RawLocalFileSystem$LocalFSFileOutputStream.<init>(RawLocalFileSystem.java:225)
	at org.apache.hadoop.fs.RawLocalFileSystem$LocalFSFileOutputStream.<init>(RawLocalFileSystem.java:209)
	at org.apache.hadoop.fs.RawLocalFileSystem.createOutputStreamWithMode(RawLocalFileSystem.java:305)
	at org.apache.hadoop.fs.RawLocalFileSystem.create(RawLocalFileSystem.java:294)
	at org.apache.hadoop.fs.RawLocalFileSystem.create(RawLocalFileSystem.java:326)
	at org.apache.hadoop.fs.ChecksumFileSystem$ChecksumFSOutputSummer.<init>(ChecksumFileSystem.java:393)
	at org.apache.hadoop.fs.ChecksumFileSystem.create(ChecksumFileSystem.java:456)
	at org.apache.hadoop.fs.ChecksumFileSystem.create(ChecksumFileSystem.java:435)
	at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:909)
	at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:890)
	at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:787)
	at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:776)
	at org.apache.tajo.storage.text.DelimitedTextFile$DelimitedTextFileAppender.init(DelimitedTextFile.java:161)
	at org.apache.tajo.storage.TestLineReader.testByteBufLineReader(TestLineReader.java:71)

rquet.hadoop.InternalParquetRecordReader: block read in memory in 1 ms. row count = 1
Aug 19, 2015 3:30:03 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordWriter: Flushing mem columnStore to file. allocated memory: 280,000
Aug 19, 2015 3:30:03 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 40,047B for [id] INT32: 10,000 values, 40,008B raw, 40,008B comp, 1 pages, encodings: [PLAIN, BIT_PACKED, RLE]
Aug 19, 2015 3:30:03 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 80,055B for [age] INT64: 10,000 values, 80,008B raw, 80,008B comp, 1 pages, encodings: [PLAIN, BIT_PACKED, RLE]
Aug 19, 2015 3:30:03 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 40,047B for [score] FLOAT: 10,000 values, 40,008B raw, 40,008B comp, 1 pages, encodings: [PLAIN, BIT_PACKED, RLE]
Aug 19, 2015 3:30:03 AM INFO: org.apache.parquet.hadoop.ParquetFileReader: Initiating action with parallelism: 5
Aug 19, 2015 3:30:03 AM INFO: org.apache.parquet.hadoop.ParquetFileReader: reading another 1 footers
Aug 19, 2015 3:30:03 AM INFO: org.apache.parquet.hadoop.ParquetFileReader: Initiating action with parallelism: 5
Aug 19, 2015 3:30:03 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordReader: RecordReader initialized will read a total of 10000 records.
Aug 19, 2015 3:30:03 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordReader: at row 0. reading next block
Aug 19, 2015 3:30:03 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordReader: block read in memory in 1 ms. row count = 10000
Aug 19, 2015 3:30:03 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordWriter: Flushing mem columnStore to file. allocated memory: 66,794
Aug 19, 2015 3:30:03 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 36B for [col1] BOOLEAN: 12 values, 9B raw, 9B comp, 1 pages, encodings: [PLAIN, BIT_PACKED, RLE]
Aug 19, 2015 3:30:03 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 48B for [col2] BINARY: 12 values, 9B raw, 9B comp, 1 pages, encodings: [PLAIN_DICTIONARY, BIT_PACKED, RLE], dic { 1 entries, 11B raw, 1B comp}
Aug 19, 2015 3:30:03 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 42B for [col3] INT32: 12 values, 9B raw, 9B comp, 1 pages, encodings: [PLAIN_DICTIONARY, BIT_PACKED, RLE], dic { 1 entries, 4B raw, 1B comp}
Aug 19, 2015 3:30:03 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 42B for [col4] INT32: 12 values, 9B raw, 9B comp, 1 pages, encodings: [PLAIN_DICTIONARY, BIT_PACKED, RLE], dic { 1 entries, 4B raw, 1B comp}
Aug 19, 2015 3:30:03 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 50B for [col5] INT64: 12 values, 9B raw, 9B comp, 1 pages, encodings: [PLAIN_DICTIONARY, BIT_PACKED, RLE], dic { 1 entries, 8B raw, 1B comp}
Aug 19, 2015 3:30:03 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 42B for [col6] FLOAT: 12 values, 9B raw, 9B comp, 1 pages, encodings: [PLAIN_DICTIONARY, BIT_PACKED, RLE], dic { 1 entries, 4B raw, 1B comp}
Aug 19, 2015 3:30:03 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 50B for [col7] DOUBLE: 12 values, 9B raw, 9B comp, 1 pages, encodings: [PLAIN_DICTIONARY, BIT_PACKED, RLE], dic { 1 entries, 8B raw, 1B comp}
Aug 19, 2015 3:30:03 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 48B for [col8] BINARY: 12 values, 9B raw, 9B comp, 1 pages, encodings: [PLAIN_DICTIONARY, BIT_PACKED, RLE], dic { 1 entries, 11B raw, 1B comp}
Aug 19, 2015 3:30:03 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 49B for [col9] BINARY: 12 values, 10B raw, 10B comp, 1 pages, encodings: [PLAIN_DICTIONARY, BIT_PACKED, RLE], dic { 1 entries, 11B raw, 1B comp}
Aug 19, 2015 3:30:03 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 43B for [col10] BINARY: 12 values, 10B raw, 10B comp, 1 pages, encodings: [PLAIN_DICTIONARY, BIT_PACKED, RLE], dic { 1 entries, 8B raw, 1B comp}
Aug 19, 2015 3:30:03 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 53B for [col12] BINARY: 12 values, 10B raw, 10B comp, 1 pages, encodings: [PLAIN_DICTIONARY, BIT_PACKED, RLE], dic { 1 entries, 13B raw, 1B comp}
Aug 19, 2015 3:30:04 AM INFO: org.apache.parquet.hadoop.ParquetFileReader: Initiating action with parallelism: 5
Aug 19, 2015 3:30:04 AM INFO: org.apache.parquet.hadoop.ParquetFileReader: reading another 1 footers
Aug 19, 2015 3:30:04 AM INFO: org.apache.parquet.hadoop.ParquetFileReader: Initiating action with parallelism: 5
Aug 19, 2015 3:30:04 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordReader: RecordReader initialized will read a total of 12 records.
Aug 19, 2015 3:30:04 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordReader: at row 0. reading next block
Aug 19, 2015 3:30:04 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordReader: block read in memory in 1 ms. row count = 12
Aug 19, 2015 3:30:13 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordWriter: Flushing mem columnStore to file. allocated memory: 65,659
Aug 19, 2015 3:30:13 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 34B for [myboolean] BOOLEAN: 1 values, 7B raw, 7B comp, 1 pages, encodings: [PLAIN, BIT_PACKED, RLE]
Aug 19, 2015 3:30:13 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 43B for [mybit] INT32: 1 values, 10B raw, 10B comp, 1 pages, encodings: [PLAIN, BIT_PACKED, RLE]
Aug 19, 2015 3:30:13 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 38B for [mychar] BINARY: 1 values, 11B raw, 11B comp, 1 pages, encodings: [PLAIN, BIT_PACKED, RLE]
Aug 19, 2015 3:30:13 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 43B for [myint2] INT32: 1 values, 10B raw, 10B comp, 1 pages, encodings: [PLAIN, BIT_PACKED, RLE]
Aug 19, 2015 3:30:13 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 43B for [myint4] INT32: 1 values, 10B raw, 10B comp, 1 pages, encodings: [PLAIN, BIT_PACKED, RLE]
Aug 19, 2015 3:30:13 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 55B for [myint8] INT64: 1 values, 14B raw, 14B comp, 1 pages, encodings: [PLAIN, BIT_PACKED, RLE]
Aug 19, 2015 3:30:13 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 43B for [myfloat4] FLOAT: 1 values, 10B raw, 10B comp, 1 pages, encodings: [PLAIN, BIT_PACKED, RLE]
Aug 19, 2015 3:30:13 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 55B for [myfloat8] DOUBLE: 1 values, 14B raw, 14B comp, 1 pages, encodings: [PLAIN, BIT_PACKED, RLE]
Aug 19, 2015 3:30:13 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 50B for [mytext] BINARY: 1 values, 15B raw, 15B comp, 1 pages, encodings: [PLAIN, BIT_PACKED, RLE]
Aug 19, 2015 3:30:13 AM INFO: org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 50B for [myblob] BINARY: 1 values, 15B raw, 15B comp, 1 pages, encodings: [PLAIN, BIT_PACKED, RLE]
Aug 19, 2015 3:30:13 AM INFO: org.apache.parquet.hadoop.ParquetFileReader: Initiating action with parallelism: 5
Aug 19, 2015 3:30:13 AM INFO: org.apache.parquet.hadoop.ParquetFileReader: reading another 1 footers
Aug 19, 2015 3:30:13 AM INFO: org.apache.parquet.hadoop.ParquetFileReader: Initiating action with parallelism: 5
Aug 19, 2015 3:30:13 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordReader: RecordReader initialized will read a total of 1 records.
Aug 19, 2015 3:30:13 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordReader: at row 0. reading next block
Aug 19, 2015 3:30:13 AM INFO: org.apache.parquet.hadoop.InternalParquetRecordReader: block read in memory in 1 ms. row count = 1

Results :

Tests in error: 
  TestStorages.testMaxValue:923 » IO Cannot run program "chmod": error=11, Resou...
  TestStorages.testNullHandlingTypes:429 » OutOfMemory unable to create new nati...
  TestFileSystems.testBlockSplit:120 » IO Cannot run program "chmod": error=11, ...
  TestAvroUtil.testGetSchema:80 » OutOfMemory unable to create new native thread
  TestFileTablespace.testGetSplitWithBlockStorageLocationsBatching:207 » OutOfMemory
  TestFileTablespace.testGetScannerAndAppender:88 » IO Cannot run program "chmod...
  TestFileTablespace.testGetFileTablespace:219 » OutOfMemory unable to create ne...
  TestByteBufLineReader.testReaderWithLocalFS:68 » IO Cannot run program "chmod"...
  TestByteBufLineReader.testReaderWithDFS:96 » OutOfMemory unable to create new ...
  TestByteBufLineReader.testReaderWithNIO:137 » IO Cannot run program "chmod": e...
  TestLineReader.testByteBufLineReader:71 » OutOfMemory unable to create new nat...

Tests run: 178, Failures: 0, Errors: 11, Skipped: 0

[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Tajo Main ......................................... SUCCESS [  2.133 s]
[INFO] Tajo Project POM .................................. SUCCESS [  2.797 s]
[INFO] Tajo Maven Plugins ................................ SUCCESS [  3.660 s]
[INFO] Tajo Common ....................................... SUCCESS [ 34.828 s]
[INFO] Tajo Algebra ...................................... SUCCESS [  3.505 s]
[INFO] Tajo Catalog Common ............................... SUCCESS [  5.638 s]
[INFO] Tajo Plan ......................................... SUCCESS [  8.209 s]
[INFO] Tajo Rpc Common ................................... SUCCESS [  1.591 s]
[INFO] Tajo Protocol Buffer Rpc .......................... SUCCESS [01:30 min]
[INFO] Tajo Catalog Client ............................... SUCCESS [  1.531 s]
[INFO] Tajo Catalog Server ............................... SUCCESS [ 12.691 s]
[INFO] Tajo Storage Common ............................... SUCCESS [ 12.845 s]
[INFO] Tajo HDFS Storage ................................. FAILURE [ 59.949 s]
[INFO] Tajo PullServer ................................... SKIPPED
[INFO] Tajo Client ....................................... SKIPPED
[INFO] Tajo CLI tools .................................... SKIPPED
[INFO] Tajo JDBC Driver .................................. SKIPPED
[INFO] ASM (thirdparty) .................................. SKIPPED
[INFO] Tajo RESTful Container ............................ SKIPPED
[INFO] Tajo Metrics ...................................... SKIPPED
[INFO] Tajo Core ......................................... SKIPPED
[INFO] Tajo RPC .......................................... SKIPPED
[INFO] Tajo Catalog Drivers Hive ......................... SKIPPED
[INFO] Tajo Catalog Drivers .............................. SKIPPED
[INFO] Tajo Catalog ...................................... SKIPPED
[INFO] Tajo HBase Storage ................................ SKIPPED
[INFO] Tajo Storage ...................................... SKIPPED
[INFO] Tajo Distribution ................................. SKIPPED
[INFO] Tajo Cluster Tests ................................ SKIPPED
[INFO] Tajo Core Tests ................................... SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 04:00 min
[INFO] Finished at: 2015-08-19T03:30:18+00:00
[INFO] Final Memory: 75M/406M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on project tajo-storage-hdfs: There are test failures.
[ERROR] 
[ERROR] Please refer to <https://builds.apache.org/job/Tajo-master-nightly/ws/tajo-storage/tajo-storage-hdfs/target/surefire-reports> for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :tajo-storage-hdfs
Build step 'Execute shell' marked build as failure
Archiving artifacts
Recording test results