You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by ra...@apache.org on 2016/08/01 10:05:20 UTC

[22/47] incubator-carbondata git commit: Fix some code style (#861)

Fix some code style (#861)

Fix code styles

Project: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/commit/ad1c9859
Tree: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/tree/ad1c9859
Diff: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/diff/ad1c9859

Branch: refs/heads/master
Commit: ad1c9859658cf9941bc444eacee0f8fe66cfff67
Parents: 4b6314c
Author: Zhangshunyu <zh...@huawei.com>
Authored: Mon Jul 25 18:34:47 2016 +0800
Committer: sujith71955 <su...@gmail.com>
Committed: Mon Jul 25 16:04:47 2016 +0530

----------------------------------------------------------------------
 .../org/apache/spark/sql/CarbonSqlParser.scala  |  1 -
 .../execution/command/carbonTableSchema.scala   |  7 ++-----
 .../spark/rdd/CarbonDataLoadRDD.scala           |  1 -
 .../carbondata/spark/rdd/CarbonMergerRDD.scala  | 21 ++++++--------------
 4 files changed, 8 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/ad1c9859/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSqlParser.scala b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSqlParser.scala
index 79471a9..e222bec 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSqlParser.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSqlParser.scala
@@ -17,7 +17,6 @@
 
 package org.apache.spark.sql
 
-import java.nio.charset.Charset
 import java.util
 import java.util.regex.{Matcher, Pattern}
 

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/ad1c9859/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
index 30c022e..5da01ac 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
@@ -20,7 +20,7 @@ package org.apache.spark.sql.execution.command
 import java.io.File
 import java.text.SimpleDateFormat
 import java.util
-import java.util.{Date, UUID}
+import java.util.UUID
 
 import scala.collection.JavaConverters._
 import scala.collection.mutable.ArrayBuffer
@@ -29,10 +29,7 @@ import scala.util.Random
 
 import org.apache.spark.SparkEnv
 import org.apache.spark.sql._
-import org.apache.spark.sql.catalyst.analysis.TypeCheckResult
 import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference, Cast, Literal}
-import org.apache.spark.sql.catalyst.util.DateTimeUtils
-import org.apache.spark.sql.catalyst.util.DateTimeUtils.SQLTimestamp
 import org.apache.spark.sql.execution.{RunnableCommand, SparkPlan}
 import org.apache.spark.sql.hive.HiveContext
 import org.apache.spark.sql.types.TimestampType
@@ -59,7 +56,7 @@ import org.carbondata.spark.exception.MalformedCarbonCommandException
 import org.carbondata.spark.load._
 import org.carbondata.spark.partition.api.impl.QueryPartitionHelper
 import org.carbondata.spark.rdd.CarbonDataRDDFactory
-import org.carbondata.spark.util.{CarbonScalaUtil, CommonUtil, GlobalDictionaryUtil}
+import org.carbondata.spark.util.{CarbonScalaUtil, GlobalDictionaryUtil}
 import org.carbondata.spark.CarbonSparkFactory
 
 

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/ad1c9859/integration/spark/src/main/scala/org/carbondata/spark/rdd/CarbonDataLoadRDD.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/carbondata/spark/rdd/CarbonDataLoadRDD.scala b/integration/spark/src/main/scala/org/carbondata/spark/rdd/CarbonDataLoadRDD.scala
index 60a3a8a..87f7885 100644
--- a/integration/spark/src/main/scala/org/carbondata/spark/rdd/CarbonDataLoadRDD.scala
+++ b/integration/spark/src/main/scala/org/carbondata/spark/rdd/CarbonDataLoadRDD.scala
@@ -180,7 +180,6 @@ class CarbonDataLoadRDD[K, V](
         }
         if (storeLocation == null) {
           storeLocation = System.getProperty("java.io.tmpdir")
-          // storeLocation = storeLocation + "/carbonstore/" + System.nanoTime()
         }
         storeLocation = storeLocation + '/' + System.nanoTime() + '/' + theSplit.index
         dataloadStatus = CarbonCommonConstants.STORE_LOADSTATUS_SUCCESS

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/ad1c9859/integration/spark/src/main/scala/org/carbondata/spark/rdd/CarbonMergerRDD.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/carbondata/spark/rdd/CarbonMergerRDD.scala b/integration/spark/src/main/scala/org/carbondata/spark/rdd/CarbonMergerRDD.scala
index e2dc900..0129a2c 100644
--- a/integration/spark/src/main/scala/org/carbondata/spark/rdd/CarbonMergerRDD.scala
+++ b/integration/spark/src/main/scala/org/carbondata/spark/rdd/CarbonMergerRDD.scala
@@ -188,7 +188,7 @@ class CarbonMergerRDD[K, V](
 
     var noOfBlocks = 0
 
-    var taskInfoList = new util.ArrayList[Distributable]
+    val taskInfoList = new util.ArrayList[Distributable]
 
     // for each valid segment.
     for (eachSeg <- carbonMergerMapping.validSegments) {
@@ -227,15 +227,13 @@ class CarbonMergerRDD[K, V](
       )
 
       noOfBlocks += blocksOfOneSegment.size
-      var index = 0
-       taskIdMapping.asScala.foreach(
+      taskIdMapping.asScala.foreach(
         entry =>
           taskInfoList.add(new TableTaskInfo(entry._1, entry._2).asInstanceOf[Distributable])
       )
     }
     // send complete list of blocks to the mapping util.
-      nodeMapping =
-        CarbonLoaderUtil.nodeBlockMapping(taskInfoList, -1)
+    nodeMapping = CarbonLoaderUtil.nodeBlockMapping(taskInfoList, -1)
 
     val confExecutors = confExecutorsTemp.toInt
     val requiredExecutors = if (nodeMapping.size > confExecutors) {
@@ -273,13 +271,7 @@ class CarbonMergerRDD[K, V](
           .add(new NodeInfo(blocksPerNode.getTaskId, blocksPerNode.getTableBlockInfoList.size))
        })
       if (list.size() != 0) {
-           result
-             .add(new CarbonSparkPartition(id,
-               i,
-               Seq(entry._1).toArray,
-               list
-             )
-             )
+           result.add(new CarbonSparkPartition(id, i, Seq(entry._1).toArray, list))
            i += 1
          }
     }
@@ -287,14 +279,13 @@ class CarbonMergerRDD[K, V](
     // print the node info along with task and number of blocks for the task.
 
     nodeTaskBlocksMap.asScala.foreach((entry : (String, List[NodeInfo])) => {
-      logInfo(s"for the node $entry._1" )
+      logInfo(s"for the node ${entry._1}" )
       for (elem <- entry._2.asScala) {
         logInfo("Task ID is " + elem.TaskId + "no. of blocks is " + elem.noOfBlocks)
       }
     } )
 
-    // val noOfBlocks = blockList.size
-    val noOfNodes = nodes.size
+    val noOfNodes = nodes.length
     val noOfTasks = result.size
     logInfo(s"Identified  no.of.Blocks: $noOfBlocks,"
             + s"parallelism: $defaultParallelism , no.of.nodes: $noOfNodes, no.of.tasks: $noOfTasks"