You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by Apache Jenkins Server <je...@builds.apache.org> on 2019/01/28 16:00:08 UTC

Build failed in Jenkins: carbondata-master-spark-2.2 » Apache CarbonData :: Examples #1508

See <https://builds.apache.org/job/carbondata-master-spark-2.2/org.apache.carbondata$carbondata-examples/1508/display/redirect>

------------------------------------------
[...truncated 167.26 KB...]
	at org.scalatest.FunSuite.org$scalatest$FunSuiteLike$$super$run(FunSuite.scala:1555)
	at org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:212)
	at org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:212)
	at org.scalatest.SuperEngine.runImpl(Engine.scala:545)
	at org.scalatest.FunSuiteLike$class.run(FunSuiteLike.scala:212)
	at org.apache.carbondata.examplesCI.RunExamples.org$scalatest$BeforeAndAfterAll$$super$run(RunExamples.scala:35)
	at org.scalatest.BeforeAndAfterAll$class.liftedTree1$1(BeforeAndAfterAll.scala:257)
	at org.scalatest.BeforeAndAfterAll$class.run(BeforeAndAfterAll.scala:256)
	at org.apache.carbondata.examplesCI.RunExamples.run(RunExamples.scala:35)
	at org.scalatest.Suite$class.callExecuteOnSuite$1(Suite.scala:1492)
	at org.scalatest.Suite$$anonfun$runNestedSuites$1.apply(Suite.scala:1528)
	at org.scalatest.Suite$$anonfun$runNestedSuites$1.apply(Suite.scala:1526)
	at scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)
	at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186)
	at org.scalatest.Suite$class.runNestedSuites(Suite.scala:1526)
	at org.scalatest.tools.DiscoverySuite.runNestedSuites(DiscoverySuite.scala:29)
	at org.scalatest.Suite$class.run(Suite.scala:1421)
	at org.scalatest.tools.DiscoverySuite.run(DiscoverySuite.scala:29)
	at org.scalatest.tools.SuiteRunner.run(SuiteRunner.scala:55)
	at org.scalatest.tools.Runner$$anonfun$doRunRunRunDaDoRunRun$3.apply(Runner.scala:2563)
	at org.scalatest.tools.Runner$$anonfun$doRunRunRunDaDoRunRun$3.apply(Runner.scala:2557)
	at scala.collection.immutable.List.foreach(List.scala:381)
	at org.scalatest.tools.Runner$.doRunRunRunDaDoRunRun(Runner.scala:2557)
	at org.scalatest.tools.Runner$$anonfun$runOptionallyWithPassFailReporter$2.apply(Runner.scala:1044)
	at org.scalatest.tools.Runner$$anonfun$runOptionallyWithPassFailReporter$2.apply(Runner.scala:1043)
	at org.scalatest.tools.Runner$.withClassLoaderAndDispatchReporter(Runner.scala:2722)
	at org.scalatest.tools.Runner$.runOptionallyWithPassFailReporter(Runner.scala:1043)
	at org.scalatest.tools.Runner$.main(Runner.scala:860)
	at org.scalatest.tools.Runner.main(Runner.scala)
2019-01-28 15:59:09 ERROR CarbonDataRDDFactory$:420 - java.io.IOException: Failed to create local dir in /tmp/blockmgr-ce74bf3c-149c-4744-8276-e7a76b0f5b36/22.
2019-01-28 15:59:09 ERROR CarbonLoadDataCommand:391 - java.lang.Exception: DataLoad failure
2019-01-28 15:59:09 ERROR CarbonLoadDataCommand:166 - Got exception java.lang.Exception: DataLoad failure when processing data. But this command does not support undo yet, skipping the undo part.
2019-01-28 15:59:09 AUDIT audit:93 - {"time":"January 28, 2019 7:59:09 AM PST","username":"jenkins","opName":"LOAD DATA","opId":"8448117952901176","opStatus":"FAILED","opTime":"412 ms","table":"default.timeSeriesTable","extraInfo":{"Exception":"java.lang.Exception","Message":"DataLoad failure"}}
- TimeSeriesPreAggregateTableExample *** FAILED ***
  java.lang.Exception: DataLoad failure
  at org.apache.carbondata.spark.rdd.CarbonDataRDDFactory$.loadCarbonData(CarbonDataRDDFactory.scala:483)
  at org.apache.spark.sql.execution.command.management.CarbonLoadDataCommand.loadData(CarbonLoadDataCommand.scala:630)
  at org.apache.spark.sql.execution.command.management.CarbonLoadDataCommand.processData(CarbonLoadDataCommand.scala:357)
  at org.apache.spark.sql.execution.command.AtomicRunnableCommand$$anonfun$run$3.apply(package.scala:148)
  at org.apache.spark.sql.execution.command.AtomicRunnableCommand$$anonfun$run$3.apply(package.scala:145)
  at org.apache.spark.sql.execution.command.Auditable$class.runWithAudit(package.scala:104)
  at org.apache.spark.sql.execution.command.AtomicRunnableCommand.runWithAudit(package.scala:141)
  at org.apache.spark.sql.execution.command.AtomicRunnableCommand.run(package.scala:145)
  at org.apache.spark.sql.CarbonDataFrameWriter.loadDataFrame(CarbonDataFrameWriter.scala:62)
  at org.apache.spark.sql.CarbonDataFrameWriter.writeToCarbonFile(CarbonDataFrameWriter.scala:46)
  ...
2019-01-28 15:59:09 AUDIT audit:72 - {"time":"January 28, 2019 7:59:09 AM PST","username":"jenkins","opName":"CREATE TABLE","opId":"8448118463044015","opStatus":"START"}
2019-01-28 15:59:10 AUDIT audit:93 - {"time":"January 28, 2019 7:59:10 AM PST","username":"jenkins","opName":"CREATE TABLE","opId":"8448118463044015","opStatus":"SUCCESS","opTime":"215 ms","table":"default.persontable","extraInfo":{"bad_record_path":"","streaming":"false","local_dictionary_enable":"true","external":"false","sort_columns":"","comment":""}}
2019-01-28 15:59:10 AUDIT audit:72 - {"time":"January 28, 2019 7:59:10 AM PST","username":"jenkins","opName":"LOAD DATA OVERWRITE","opId":"8448118681915381","opStatus":"START"}
2019-01-28 15:59:10 ERROR CarbonDataRDDFactory$:1108 - load data frame failed
java.io.IOException: Failed to create local dir in /tmp/blockmgr-ce74bf3c-149c-4744-8276-e7a76b0f5b36/23.
	at org.apache.spark.storage.DiskBlockManager.getFile(DiskBlockManager.scala:70)
	at org.apache.spark.storage.DiskStore.remove(DiskStore.scala:135)
	at org.apache.spark.storage.BlockManager.removeBlockInternal(BlockManager.scala:1457)
	at org.apache.spark.storage.BlockManager.doPut(BlockManager.scala:991)
	at org.apache.spark.storage.BlockManager.doPutIterator(BlockManager.scala:1029)
	at org.apache.spark.storage.BlockManager.putIterator(BlockManager.scala:792)
	at org.apache.spark.storage.BlockManager.putSingle(BlockManager.scala:1350)
	at org.apache.spark.broadcast.TorrentBroadcast.writeBlocks(TorrentBroadcast.scala:122)
	at org.apache.spark.broadcast.TorrentBroadcast.<init>(TorrentBroadcast.scala:88)
	at org.apache.spark.broadcast.TorrentBroadcastFactory.newBroadcast(TorrentBroadcastFactory.scala:34)
	at org.apache.spark.broadcast.BroadcastManager.newBroadcast(BroadcastManager.scala:56)
	at org.apache.spark.SparkContext.broadcast(SparkContext.scala:1488)
	at org.apache.spark.sql.util.SparkSQLUtil$.broadCastHadoopConf(SparkSQLUtil.scala:115)
	at org.apache.carbondata.spark.rdd.CarbonRDD.<init>(CarbonRDD.scala:56)
	at org.apache.spark.rdd.DataLoadCoalescedRDD.<init>(DataLoadCoalescedRDD.scala:34)
	at org.apache.carbondata.spark.rdd.CarbonDataRDDFactory$.loadDataFrame(CarbonDataRDDFactory.scala:1097)
	at org.apache.carbondata.spark.rdd.CarbonDataRDDFactory$.loadCarbonData(CarbonDataRDDFactory.scala:364)
	at org.apache.spark.sql.execution.command.management.CarbonLoadDataCommand.loadData(CarbonLoadDataCommand.scala:630)
	at org.apache.spark.sql.execution.command.management.CarbonLoadDataCommand.processData(CarbonLoadDataCommand.scala:357)
	at org.apache.spark.sql.execution.command.AtomicRunnableCommand$$anonfun$run$3.apply(package.scala:148)
	at org.apache.spark.sql.execution.command.AtomicRunnableCommand$$anonfun$run$3.apply(package.scala:145)
	at org.apache.spark.sql.execution.command.Auditable$class.runWithAudit(package.scala:104)
	at org.apache.spark.sql.execution.command.AtomicRunnableCommand.runWithAudit(package.scala:141)
	at org.apache.spark.sql.execution.command.AtomicRunnableCommand.run(package.scala:145)
	at org.apache.spark.sql.CarbonDataFrameWriter.loadDataFrame(CarbonDataFrameWriter.scala:62)
	at org.apache.spark.sql.CarbonDataFrameWriter.writeToCarbonFile(CarbonDataFrameWriter.scala:46)
	at org.apache.spark.sql.CarbonDataFrameWriter.saveAsCarbonFile(CarbonDataFrameWriter.scala:37)
	at org.apache.spark.sql.CarbonSource.createRelation(CarbonSource.scala:113)
	at org.apache.spark.sql.execution.datasources.DataSource.write(DataSource.scala:469)
	at org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.run(SaveIntoDataSourceCommand.scala:50)
	at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:58)
	at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:56)
	at org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:74)
	at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:117)
	at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:117)
	at org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:138)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
	at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:135)
	at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:116)
	at org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:92)
	at org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:92)
	at org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:609)
	at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:233)
	at org.apache.carbondata.examples.LuceneDataMapExample$.exampleBody(LuceneDataMapExample.scala:54)
	at org.apache.carbondata.examplesCI.RunExamples$$anonfun$17.apply$mcV$sp(RunExamples.scala:118)
	at org.apache.carbondata.examplesCI.RunExamples$$anonfun$17.apply(RunExamples.scala:118)
	at org.apache.carbondata.examplesCI.RunExamples$$anonfun$17.apply(RunExamples.scala:118)
	at org.scalatest.Transformer$$anonfun$apply$1.apply$mcV$sp(Transformer.scala:22)
	at org.scalatest.OutcomeOf$class.outcomeOf(OutcomeOf.scala:85)
	at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
	at org.scalatest.Transformer.apply(Transformer.scala:22)
	at org.scalatest.Transformer.apply(Transformer.scala:20)
	at org.scalatest.FunSuiteLike$$anon$1.apply(FunSuiteLike.scala:166)
	at org.apache.spark.sql.test.util.CarbonFunSuite.withFixture(CarbonFunSuite.scala:41)
	at org.scalatest.FunSuiteLike$class.invokeWithFixture$1(FunSuiteLike.scala:163)
	at org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:175)
	at org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:175)
	at org.scalatest.SuperEngine.runTestImpl(Engine.scala:306)
	at org.scalatest.FunSuiteLike$class.runTest(FunSuiteLike.scala:175)
	at org.scalatest.FunSuite.runTest(FunSuite.scala:1555)
	at org.scalatest.FunSuiteLike$$anonfun$runTests$1.apply(FunSuiteLike.scala:208)
	at org.scalatest.FunSuiteLike$$anonfun$runTests$1.apply(FunSuiteLike.scala:208)
	at org.scalatest.SuperEngine$$anonfun$traverseSubNodes$1$1.apply(Engine.scala:413)
	at org.scalatest.SuperEngine$$anonfun$traverseSubNodes$1$1.apply(Engine.scala:401)
	at scala.collection.immutable.List.foreach(List.scala:381)
	at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:401)
	at org.scalatest.SuperEngine.org$scalatest$SuperEngine$$runTestsInBranch(Engine.scala:396)
	at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:483)
	at org.scalatest.FunSuiteLike$class.runTests(FunSuiteLike.scala:208)
	at org.scalatest.FunSuite.runTests(FunSuite.scala:1555)
	at org.scalatest.Suite$class.run(Suite.scala:1424)
	at org.scalatest.FunSuite.org$scalatest$FunSuiteLike$$super$run(FunSuite.scala:1555)
	at org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:212)
	at org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:212)
	at org.scalatest.SuperEngine.runImpl(Engine.scala:545)
	at org.scalatest.FunSuiteLike$class.run(FunSuiteLike.scala:212)
	at org.apache.carbondata.examplesCI.RunExamples.org$scalatest$BeforeAndAfterAll$$super$run(RunExamples.scala:35)
	at org.scalatest.BeforeAndAfterAll$class.liftedTree1$1(BeforeAndAfterAll.scala:257)
	at org.scalatest.BeforeAndAfterAll$class.run(BeforeAndAfterAll.scala:256)
	at org.apache.carbondata.examplesCI.RunExamples.run(RunExamples.scala:35)
	at org.scalatest.Suite$class.callExecuteOnSuite$1(Suite.scala:1492)
	at org.scalatest.Suite$$anonfun$runNestedSuites$1.apply(Suite.scala:1528)
	at org.scalatest.Suite$$anonfun$runNestedSuites$1.apply(Suite.scala:1526)
	at scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)
	at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186)
	at org.scalatest.Suite$class.runNestedSuites(Suite.scala:1526)
	at org.scalatest.tools.DiscoverySuite.runNestedSuites(DiscoverySuite.scala:29)
	at org.scalatest.Suite$class.run(Suite.scala:1421)
	at org.scalatest.tools.DiscoverySuite.run(DiscoverySuite.scala:29)
	at org.scalatest.tools.SuiteRunner.run(SuiteRunner.scala:55)
	at org.scalatest.tools.Runner$$anonfun$doRunRunRunDaDoRunRun$3.apply(Runner.scala:2563)
	at org.scalatest.tools.Runner$$anonfun$doRunRunRunDaDoRunRun$3.apply(Runner.scala:2557)
	at scala.collection.immutable.List.foreach(List.scala:381)
	at org.scalatest.tools.Runner$.doRunRunRunDaDoRunRun(Runner.scala:2557)
	at org.scalatest.tools.Runner$$anonfun$runOptionallyWithPassFailReporter$2.apply(Runner.scala:1044)
	at org.scalatest.tools.Runner$$anonfun$runOptionallyWithPassFailReporter$2.apply(Runner.scala:1043)
	at org.scalatest.tools.Runner$.withClassLoaderAndDispatchReporter(Runner.scala:2722)
	at org.scalatest.tools.Runner$.runOptionallyWithPassFailReporter(Runner.scala:1043)
	at org.scalatest.tools.Runner$.main(Runner.scala:860)
	at org.scalatest.tools.Runner.main(Runner.scala)
2019-01-28 15:59:10 ERROR CarbonDataRDDFactory$:420 - java.io.IOException: Failed to create local dir in /tmp/blockmgr-ce74bf3c-149c-4744-8276-e7a76b0f5b36/23.
2019-01-28 15:59:10 ERROR CarbonLoadDataCommand:391 - java.lang.Exception: DataLoad failure
2019-01-28 15:59:10 ERROR CarbonLoadDataCommand:166 - Got exception java.lang.Exception: DataLoad failure when processing data. But this command does not support undo yet, skipping the undo part.
2019-01-28 15:59:10 AUDIT audit:93 - {"time":"January 28, 2019 7:59:10 AM PST","username":"jenkins","opName":"LOAD DATA OVERWRITE","opId":"8448118681915381","opStatus":"FAILED","opTime":"244 ms","table":"default.personTable","extraInfo":{"Exception":"java.lang.Exception","Message":"DataLoad failure"}}
- LuceneDataMapExample *** FAILED ***
  java.lang.Exception: DataLoad failure
  at org.apache.carbondata.spark.rdd.CarbonDataRDDFactory$.loadCarbonData(CarbonDataRDDFactory.scala:483)
  at org.apache.spark.sql.execution.command.management.CarbonLoadDataCommand.loadData(CarbonLoadDataCommand.scala:630)
  at org.apache.spark.sql.execution.command.management.CarbonLoadDataCommand.processData(CarbonLoadDataCommand.scala:357)
  at org.apache.spark.sql.execution.command.AtomicRunnableCommand$$anonfun$run$3.apply(package.scala:148)
  at org.apache.spark.sql.execution.command.AtomicRunnableCommand$$anonfun$run$3.apply(package.scala:145)
  at org.apache.spark.sql.execution.command.Auditable$class.runWithAudit(package.scala:104)
  at org.apache.spark.sql.execution.command.AtomicRunnableCommand.runWithAudit(package.scala:141)
  at org.apache.spark.sql.execution.command.AtomicRunnableCommand.run(package.scala:145)
  at org.apache.spark.sql.CarbonDataFrameWriter.loadDataFrame(CarbonDataFrameWriter.scala:62)
  at org.apache.spark.sql.CarbonDataFrameWriter.writeToCarbonFile(CarbonDataFrameWriter.scala:46)
  ...
2019-01-28 15:59:10 AUDIT audit:72 - {"time":"January 28, 2019 7:59:10 AM PST","username":"jenkins","opName":"CREATE TABLE","opId":"8448118962718412","opStatus":"START"}
2019-01-28 15:59:10 AUDIT audit:93 - {"time":"January 28, 2019 7:59:10 AM PST","username":"jenkins","opName":"CREATE TABLE","opId":"8448118962718412","opStatus":"SUCCESS","opTime":"89 ms","table":"default.origin_table","extraInfo":{"bad_record_path":"","local_dictionary_enable":"true","external":"false","sort_columns":"","comment":""}}
2019-01-28 15:59:10 AUDIT audit:72 - {"time":"January 28, 2019 7:59:10 AM PST","username":"jenkins","opName":"LOAD DATA","opId":"8448119060460784","opStatus":"START"}
2019-01-28 15:59:10 ERROR CarbonDataRDDFactory$:420 - java.io.IOException: Failed to create local dir in /tmp/blockmgr-ce74bf3c-149c-4744-8276-e7a76b0f5b36/24.
2019-01-28 15:59:10 ERROR CarbonLoadDataCommand:391 - java.lang.Exception: DataLoad failure
2019-01-28 15:59:10 ERROR CarbonLoadDataCommand:166 - Got exception java.lang.Exception: DataLoad failure when processing data. But this command does not support undo yet, skipping the undo part.
2019-01-28 15:59:10 AUDIT audit:93 - {"time":"January 28, 2019 7:59:10 AM PST","username":"jenkins","opName":"LOAD DATA","opId":"8448119060460784","opStatus":"FAILED","opTime":"44 ms","table":"default.origin_table","extraInfo":{"Exception":"java.lang.Exception","Message":"DataLoad failure"}}
- ExternalTableExample *** FAILED ***
  java.lang.Exception: DataLoad failure
  at org.apache.carbondata.spark.rdd.CarbonDataRDDFactory$.loadCarbonData(CarbonDataRDDFactory.scala:483)
  at org.apache.spark.sql.execution.command.management.CarbonLoadDataCommand.loadData(CarbonLoadDataCommand.scala:630)
  at org.apache.spark.sql.execution.command.management.CarbonLoadDataCommand.processData(CarbonLoadDataCommand.scala:357)
  at org.apache.spark.sql.execution.command.AtomicRunnableCommand$$anonfun$run$3.apply(package.scala:148)
  at org.apache.spark.sql.execution.command.AtomicRunnableCommand$$anonfun$run$3.apply(package.scala:145)
  at org.apache.spark.sql.execution.command.Auditable$class.runWithAudit(package.scala:104)
  at org.apache.spark.sql.execution.command.AtomicRunnableCommand.runWithAudit(package.scala:141)
  at org.apache.spark.sql.execution.command.AtomicRunnableCommand.run(package.scala:145)
  at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:58)
  at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:56)
  ...

Data:
0	robot0	0	0	9223372036854775807	0.0	true	2019-03-01	2019-02-12 03:03:34.0	12.35	varchar	
Hello World From Carbon 
1	robot1	1	1	9223372036854775806	0.5	true	2019-03-01	2019-02-12 03:03:34.0	12.35	varchar	
Hello World From Carbon 
2	robot2	2	2	9223372036854775805	1.0	true	2019-03-01	2019-02-12 03:03:34.0	12.35	varchar	
Hello World From Carbon 
3	robot3	3	3	9223372036854775804	1.5	true	2019-03-01	2019-02-12 03:03:34.0	12.35	varchar	
Hello World From Carbon 
4	robot4	4	4	9223372036854775803	2.0	true	2019-03-01	2019-02-12 03:03:34.0	12.35	varchar	
Hello World From Carbon 
5	robot5	5	5	9223372036854775802	2.5	true	2019-03-01	2019-02-12 03:03:34.0	12.35	varchar	
Hello World From Carbon 
6	robot6	6	6	9223372036854775801	3.0	true	2019-03-01	2019-02-12 03:03:34.0	12.35	varchar	
Hello World From Carbon 
7	robot7	7	7	9223372036854775800	3.5	true	2019-03-01	2019-02-12 03:03:34.0	12.35	varchar	
Hello World From Carbon 
8	robot8	8	8	9223372036854775799	4.0	true	2019-03-01	2019-02-12 03:03:34.0	12.35	varchar	
Hello World From Carbon 
9	robot9	9	9	9223372036854775798	4.5	true	2019-03-01	2019-02-12 03:03:34.0	12.35	varchar	
Hello World From Carbon 

Data:
0	robot0	2019-03-01	2019-02-12 03:03:34.0	varchar	Hello World From Carbon 	0	0	9223372036854775807	0.0	true	12.35	
1	robot1	2019-03-01	2019-02-12 03:03:34.0	varchar	Hello World From Carbon 	1	1	9223372036854775806	0.5	true	12.35	
2	robot2	2019-03-01	2019-02-12 03:03:34.0	varchar	Hello World From Carbon 	2	2	9223372036854775805	1.0	true	12.35	
3	robot3	2019-03-01	2019-02-12 03:03:34.0	varchar	Hello World From Carbon 	3	3	9223372036854775804	1.5	true	12.35	
4	robot4	2019-03-01	2019-02-12 03:03:34.0	varchar	Hello World From Carbon 	4	4	9223372036854775803	2.0	true	12.35	
5	robot5	2019-03-01	2019-02-12 03:03:34.0	varchar	Hello World From Carbon 	5	5	9223372036854775802	2.5	true	12.35	
6	robot6	2019-03-01	2019-02-12 03:03:34.0	varchar	Hello World From Carbon 	6	6	9223372036854775801	3.0	true	12.35	
7	robot7	2019-03-01	2019-02-12 03:03:34.0	varchar	Hello World From Carbon 	7	7	9223372036854775800	3.5	true	12.35	
8	robot8	2019-03-01	2019-02-12 03:03:34.0	varchar	Hello World From Carbon 	8	8	9223372036854775799	4.0	true	12.35	
9	robot9	2019-03-01	2019-02-12 03:03:34.0	varchar	Hello World From Carbon 	9	9	9223372036854775798	4.5	true	12.35	
- CarbonReaderExample
2019-01-28 15:59:11 AUDIT audit:72 - {"time":"January 28, 2019 7:59:11 AM PST","username":"jenkins","opName":"CREATE TABLE","opId":"8448119882235366","opStatus":"START"}
2019-01-28 15:59:11 AUDIT audit:93 - {"time":"January 28, 2019 7:59:11 AM PST","username":"jenkins","opName":"CREATE TABLE","opId":"8448119882235366","opStatus":"SUCCESS","opTime":"87 ms","table":"default.hive_carbon_example","extraInfo":{"bad_record_path":"","local_dictionary_enable":"true","external":"false","sort_columns":"","comment":""}}
2019-01-28 15:59:11 AUDIT audit:72 - {"time":"January 28, 2019 7:59:11 AM PST","username":"jenkins","opName":"LOAD DATA","opId":"8448119997530309","opStatus":"START"}
2019-01-28 15:59:11 ERROR CarbonDataRDDFactory$:420 - java.io.IOException: Failed to create local dir in /tmp/blockmgr-ce74bf3c-149c-4744-8276-e7a76b0f5b36/25.
2019-01-28 15:59:11 ERROR CarbonLoadDataCommand:391 - java.lang.Exception: DataLoad failure
2019-01-28 15:59:11 ERROR CarbonLoadDataCommand:166 - Got exception java.lang.Exception: DataLoad failure when processing data. But this command does not support undo yet, skipping the undo part.
2019-01-28 15:59:11 AUDIT audit:93 - {"time":"January 28, 2019 7:59:11 AM PST","username":"jenkins","opName":"LOAD DATA","opId":"8448119997530309","opStatus":"FAILED","opTime":"39 ms","table":"default.hive_carbon_example","extraInfo":{"Exception":"java.lang.Exception","Message":"DataLoad failure"}}
- HiveExample *** FAILED ***
  java.lang.Exception: DataLoad failure
  at org.apache.carbondata.spark.rdd.CarbonDataRDDFactory$.loadCarbonData(CarbonDataRDDFactory.scala:483)
  at org.apache.spark.sql.execution.command.management.CarbonLoadDataCommand.loadData(CarbonLoadDataCommand.scala:630)
  at org.apache.spark.sql.execution.command.management.CarbonLoadDataCommand.processData(CarbonLoadDataCommand.scala:357)
  at org.apache.spark.sql.execution.command.AtomicRunnableCommand$$anonfun$run$3.apply(package.scala:148)
  at org.apache.spark.sql.execution.command.AtomicRunnableCommand$$anonfun$run$3.apply(package.scala:145)
  at org.apache.spark.sql.execution.command.Auditable$class.runWithAudit(package.scala:104)
  at org.apache.spark.sql.execution.command.AtomicRunnableCommand.runWithAudit(package.scala:141)
  at org.apache.spark.sql.execution.command.AtomicRunnableCommand.run(package.scala:145)
  at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:58)
  at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:56)
  ...
Run completed in 59 seconds, 407 milliseconds.
Total number of tests run: 20
Suites: completed 2, aborted 0
Tests: succeeded 1, failed 19, canceled 0, ignored 0, pending 0
*** 19 TESTS FAILED ***
[JENKINS] Recording test results

Jenkins build is back to normal : carbondata-master-spark-2.2 » Apache CarbonData :: Examples #1509

Posted by Apache Jenkins Server <je...@builds.apache.org>.
See <https://builds.apache.org/job/carbondata-master-spark-2.2/org.apache.carbondata$carbondata-examples/1509/display/redirect>