You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by ku...@apache.org on 2018/10/25 14:13:37 UTC

carbondata git commit: [CARBONDATA-3025]Added CLI enhancements

Repository: carbondata
Updated Branches:
  refs/heads/master 2c50ca5cf -> e0baa9b9f


[CARBONDATA-3025]Added CLI enhancements

Enhancement in CLI tool
1. a new option called "-v" is added to get the written_by and version details in addition to existing options

2. SQL support is added for CLI
This is introduced so that user can get the details in beeline only and no need to execute separately. Currently the command is as below, based on comment we can change this DDL
Show summary for table <table_name> options('command'='-cmd,summary,-a,-p,-b');

3. when we get details for column statistics ,we will get the Min and Max percentage, if we add actual min and max values, then it will be helpful for the developer

4. Currently CLI tool get blocklet details for all the blockfiles, so if we have more number of carbondata files, then it will take lot of time to get the details,
so limit is added to it, by default when we give option as "-b", only 4 outputs will be given, if we want more, we can pass the option value for b as limit number,
Example: "-b 30" =>> Then limit will be increased to 30

5. one more is a new Option is added called "-B", which takes mandatory option value as a block path. This is added just to get the block detail like, number of blocklets, number of pages, rows and size

Example: "-B /home/ss/ss/part-0-0_batchno0-0-0-1539782855178.carbondata"

This closes #2830


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/e0baa9b9
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/e0baa9b9
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/e0baa9b9

Branch: refs/heads/master
Commit: e0baa9b9f5e983f3ff13b70025c900761f09643c
Parents: 2c50ca5
Author: akashrn5 <ak...@gmail.com>
Authored: Thu Oct 18 15:19:25 2018 +0530
Committer: kunal642 <ku...@gmail.com>
Committed: Thu Oct 25 19:41:47 2018 +0530

----------------------------------------------------------------------
 .../TestNonTransactionalCarbonTable.scala       |   4 +
 .../spark/sql/catalyst/CarbonDDLSqlParser.scala |   8 +
 integration/spark2/pom.xml                      |   5 +
 .../management/CarbonShowSummaryCommand.scala   |  62 ++++++
 .../sql/parser/CarbonSpark2SqlParser.scala      |  19 +-
 .../org/apache/carbondata/tool/CarbonCli.java   |  61 +++++-
 .../org/apache/carbondata/tool/DataFile.java    |   5 +
 .../org/apache/carbondata/tool/DataSummary.java | 209 +++++++++++++------
 .../apache/carbondata/tool/FileCollector.java   |  33 +--
 .../apache/carbondata/tool/ScanBenchmark.java   |  18 +-
 .../apache/carbondata/tool/ShardPrinter.java    |  28 +--
 .../apache/carbondata/tool/TableFormatter.java  |  61 ++++++
 .../apache/carbondata/tool/TablePrinter.java    |  59 ------
 .../apache/carbondata/tool/CarbonCliTest.java   |  68 +++++-
 14 files changed, 467 insertions(+), 173 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/e0baa9b9/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
index d2e33e2..2036ed2 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
@@ -389,6 +389,10 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
          |'carbondata' LOCATION
          |'$writerPath' """.stripMargin)
 
+    val output = sql("show summary for table sdkOutputTable options('command'='-cmd,summary,-p,-a,-v,-c,age')").collect()
+
+    assert(output.toList.contains(Row("written_by                       Version         ")))
+
     checkExistence(sql("describe formatted sdkOutputTable"), true, "age,name")
 
     checkExistence(sql("describe formatted sdkOutputTable"), true, writerPath)

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e0baa9b9/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
index af466d4..36be655 100644
--- a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
+++ b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
@@ -188,6 +188,7 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
   protected val STREAM = carbonKeyWord("STREAM")
   protected val STREAMS = carbonKeyWord("STREAMS")
   protected val STMPROPERTIES = carbonKeyWord("STMPROPERTIES")
+  protected val SUMMARY = carbonKeyWord("SUMMARY")
 
   protected val doubleQuotedString = "\"([^\"]+)\"".r
   protected val singleQuotedString = "'([^']+)'".r
@@ -1141,6 +1142,13 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
       case _ => ("", "")
     }
 
+  protected lazy val summaryOptions: Parser[(String, String)] =
+    (stringLit <~ "=") ~ stringLit ^^ {
+      case opt ~ optvalue => (opt.trim.toLowerCase(), optvalue)
+      case _ => ("", "")
+    }
+
+
   protected lazy val partitions: Parser[(String, Option[String])] =
     (ident <~ "=".?) ~ stringLit.? ^^ {
       case opt ~ optvalue => (opt.trim, optvalue)

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e0baa9b9/integration/spark2/pom.xml
----------------------------------------------------------------------
diff --git a/integration/spark2/pom.xml b/integration/spark2/pom.xml
index 771fdf1..f874906 100644
--- a/integration/spark2/pom.xml
+++ b/integration/spark2/pom.xml
@@ -42,6 +42,11 @@
     </dependency>
     <dependency>
       <groupId>org.apache.carbondata</groupId>
+      <artifactId>carbondata-cli</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.carbondata</groupId>
       <artifactId>carbondata-store-sdk</artifactId>
       <version>${project.version}</version>
     </dependency>

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e0baa9b9/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonShowSummaryCommand.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonShowSummaryCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonShowSummaryCommand.scala
new file mode 100644
index 0000000..461f31f
--- /dev/null
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonShowSummaryCommand.scala
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.execution.command.management
+
+import java.util
+
+import scala.collection.JavaConverters._
+
+import org.apache.spark.sql.{CarbonEnv, Row, SparkSession}
+import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference}
+import org.apache.spark.sql.execution.command.{Checker, DataCommand}
+import org.apache.spark.sql.types.StringType
+
+import org.apache.carbondata.tool.CarbonCli
+
+/**
+ * Show summary command class which is integrated to cli and sql support is provided via this class
+ * @param databaseNameOp
+ * @param tableName
+ * @param commandOptions
+ */
+case class CarbonShowSummaryCommand(
+    databaseNameOp: Option[String],
+    tableName: String,
+    commandOptions: Map[String, String])
+  extends DataCommand {
+
+  override def output: Seq[Attribute] = {
+      Seq(AttributeReference("Table Summary", StringType, nullable = false)())
+  }
+
+  override def processData(sparkSession: SparkSession): Seq[Row] = {
+    Checker.validateTableExists(databaseNameOp, tableName, sparkSession)
+    val carbonTable = CarbonEnv.getCarbonTable(databaseNameOp, tableName)(sparkSession)
+    val commandArgs: Seq[String] = commandOptions("command").split(",")
+    val finalCommands = commandArgs.collect {
+      case a if a.trim.equalsIgnoreCase("-p") =>
+        Seq(a, carbonTable.getTablePath)
+      case x => Seq(x.trim)
+    }.flatten
+    val summaryOutput = new util.ArrayList[String]()
+    CarbonCli.run(finalCommands.toArray, summaryOutput)
+    summaryOutput.asScala.map(x =>
+      Row(x)
+    )
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e0baa9b9/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
index 5165526..5427168 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
@@ -78,7 +78,7 @@ class CarbonSpark2SqlParser extends CarbonDDLSqlParser {
 
   protected lazy val startCommand: Parser[LogicalPlan] =
     loadManagement | showLoads | alterTable | restructure | updateTable | deleteRecords |
-    alterPartition | datamapManagement | alterTableFinishStreaming | stream
+    alterPartition | datamapManagement | alterTableFinishStreaming | stream | cli
 
   protected lazy val loadManagement: Parser[LogicalPlan] =
     deleteLoadsByID | deleteLoadsByLoadDate | cleanFiles | loadDataNew
@@ -495,6 +495,23 @@ class CarbonSpark2SqlParser extends CarbonDDLSqlParser {
           showHistory.isDefined)
     }
 
+
+  protected lazy val cli: Parser[LogicalPlan] =
+    (SHOW ~> SUMMARY ~> FOR ~> TABLE) ~> (ident <~ ".").? ~ ident ~
+    (OPTIONS ~> "(" ~> repsep(summaryOptions, ",") <~ ")").? <~
+    opt(";") ^^ {
+      case databaseName ~ tableName ~ commandList =>
+        var commandOptions: Map[String, String] = null
+        if (commandList.isDefined) {
+          commandOptions = commandList.getOrElse(List.empty[(String, String)]).toMap
+        }
+        CarbonShowSummaryCommand(
+          convertDbNameToLowerCase(databaseName),
+          tableName.toLowerCase(),
+          commandOptions.map { case (key, value) => key.toLowerCase -> value })
+    }
+
+
   protected lazy val alterTableModifyDataType: Parser[LogicalPlan] =
     ALTER ~> TABLE ~> (ident <~ ".").? ~ ident ~ CHANGE ~ ident ~ ident ~
     ident ~ opt("(" ~> rep1sep(valueOptions, ",") <~ ")") <~ opt(";") ^^ {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e0baa9b9/tools/cli/src/main/java/org/apache/carbondata/tool/CarbonCli.java
----------------------------------------------------------------------
diff --git a/tools/cli/src/main/java/org/apache/carbondata/tool/CarbonCli.java b/tools/cli/src/main/java/org/apache/carbondata/tool/CarbonCli.java
index f1baa92..11553a6 100644
--- a/tools/cli/src/main/java/org/apache/carbondata/tool/CarbonCli.java
+++ b/tools/cli/src/main/java/org/apache/carbondata/tool/CarbonCli.java
@@ -19,6 +19,8 @@ package org.apache.carbondata.tool;
 
 import java.io.IOException;
 import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.List;
 
 import org.apache.carbondata.common.annotations.InterfaceAudience;
 import org.apache.carbondata.common.annotations.InterfaceStability;
@@ -40,6 +42,13 @@ import org.apache.commons.cli.PosixParser;
 @InterfaceStability.Unstable
 public class CarbonCli {
 
+  // List to collect all the outputs of option details
+  private static List<String> outPuts;
+
+  // a boolean variable to decide whether to print the output in console or return the list,
+  // by default true, and it will be set to false if the cli is trigerred via sql command
+  private static boolean isPrintInConsole = true;
+
   private static Options buildOptions() {
     Option help = new Option("h", "help", false,"print this message");
     Option path = OptionBuilder.withArgName("path")
@@ -64,7 +73,6 @@ public class CarbonCli {
     Option schema = new Option("s", "schema",false, "print the schema");
     Option segment = new Option("m", "showSegment", false, "print segment information");
     Option tblProperties = new Option("t", "tblProperties", false, "print table properties");
-    Option detail = new Option("b", "blocklet", false, "print blocklet size detail");
     Option columnMeta = new Option("k", "columnChunkMeta", false, "print column chunk meta");
     Option columnName = OptionBuilder
         .withArgName("column name")
@@ -73,6 +81,15 @@ public class CarbonCli {
         .withLongOpt("column")
         .create("c");
 
+    Option blockletDetail = OptionBuilder.withArgName("limitSize").hasOptionalArg()
+        .withDescription("print blocklet size detail").withLongOpt("limitSize")
+        .create("b");
+
+    Option blockLevelDetail = OptionBuilder.withArgName("blockDetail").hasArg()
+        .withDescription("print block details").withLongOpt("blockDetail")
+        .create("B");
+
+    Option version = new Option("v", "version", false, "print version details of carbondata file");
     Options options = new Options();
     options.addOption(help);
     options.addOption(path);
@@ -82,9 +99,11 @@ public class CarbonCli {
     options.addOption(schema);
     options.addOption(segment);
     options.addOption(tblProperties);
-    options.addOption(detail);
+    options.addOption(blockletDetail);
     options.addOption(columnMeta);
     options.addOption(columnName);
+    options.addOption(version);
+    options.addOption(blockLevelDetail);
     return options;
   }
 
@@ -92,7 +111,24 @@ public class CarbonCli {
     run(args, System.out);
   }
 
-  static void run(String[] args, PrintStream out) {
+  public static void run(String[] args, ArrayList<String> e) {
+    // this boolean to check whether to print in console or not
+    isPrintInConsole = false;
+    outPuts = e;
+    Options options = buildOptions();
+    CommandLineParser parser = new PosixParser();
+
+    CommandLine line;
+    try {
+      line = parser.parse(options, args);
+    } catch (ParseException exp) {
+      throw new RuntimeException("Parsing failed. Reason: " + exp.getMessage());
+    }
+
+    runCli(System.out, options, line);
+  }
+
+  public static void run(String[] args, PrintStream out) {
     Options options = buildOptions();
     CommandLineParser parser = new PosixParser();
 
@@ -104,6 +140,13 @@ public class CarbonCli {
       return;
     }
 
+    runCli(out, options, line);
+  }
+
+  private static void  runCli(PrintStream out, Options options, CommandLine line) {
+    if (outPuts == null) {
+      outPuts = new ArrayList<>();
+    }
     if (line.hasOption("h")) {
       printHelp(options);
       return;
@@ -113,22 +156,28 @@ public class CarbonCli {
     if (line.hasOption("p")) {
       path = line.getOptionValue("path");
     }
-    out.println("Input Folder: " + path);
+    outPuts.add("Input Folder: " + path);
 
     String cmd = line.getOptionValue("cmd");
     Command command;
     if (cmd.equalsIgnoreCase("summary")) {
-      command = new DataSummary(path, out);
+      command = new DataSummary(path, outPuts);
     } else if (cmd.equalsIgnoreCase("benchmark")) {
-      command = new ScanBenchmark(path, out);
+      command = new ScanBenchmark(path, outPuts);
     } else {
       out.println("command " + cmd + " is not supported");
+      outPuts.add("command " + cmd + " is not supported");
       printHelp(options);
       return;
     }
 
     try {
       command.run(line);
+      if (isPrintInConsole) {
+        for (String output : outPuts) {
+          out.println(output);
+        }
+      }
       out.flush();
     } catch (IOException | MemoryException e) {
       e.printStackTrace();

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e0baa9b9/tools/cli/src/main/java/org/apache/carbondata/tool/DataFile.java
----------------------------------------------------------------------
diff --git a/tools/cli/src/main/java/org/apache/carbondata/tool/DataFile.java b/tools/cli/src/main/java/org/apache/carbondata/tool/DataFile.java
index 039401e..c8fdc9e 100644
--- a/tools/cli/src/main/java/org/apache/carbondata/tool/DataFile.java
+++ b/tools/cli/src/main/java/org/apache/carbondata/tool/DataFile.java
@@ -317,6 +317,10 @@ class DataFile {
     // min/max stats of this column chunk
     byte[] min, max;
 
+    // to set whether min max is present for the column chunck, as we may not right min max after
+    // specific size
+    boolean isMinMaxPresent;
+
     // percentage of min/max comparing to min/max scope collected in all blocklets
     // they are set after calculation in DataSummary
     double minPercentage, maxPercentage;
@@ -335,6 +339,7 @@ class DataFile {
       this.column = column;
       min = index.min_max_index.min_values.get(columnIndex).array();
       max = index.min_max_index.max_values.get(columnIndex).array();
+      isMinMaxPresent = index.min_max_index.min_max_presence.get(columnIndex);
 
       // read the column chunk metadata: DataChunk3
       ByteBuffer buffer = fileReader.readByteBuffer(

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e0baa9b9/tools/cli/src/main/java/org/apache/carbondata/tool/DataSummary.java
----------------------------------------------------------------------
diff --git a/tools/cli/src/main/java/org/apache/carbondata/tool/DataSummary.java b/tools/cli/src/main/java/org/apache/carbondata/tool/DataSummary.java
index 5f1fb68..4a8c85e 100644
--- a/tools/cli/src/main/java/org/apache/carbondata/tool/DataSummary.java
+++ b/tools/cli/src/main/java/org/apache/carbondata/tool/DataSummary.java
@@ -17,8 +17,8 @@
 
 package org.apache.carbondata.tool;
 
+import java.io.File;
 import java.io.IOException;
-import java.io.PrintStream;
 import java.nio.charset.Charset;
 import java.util.Collection;
 import java.util.HashMap;
@@ -28,6 +28,7 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.carbondata.common.Strings;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
 import org.apache.carbondata.core.datastore.filesystem.CarbonFile;
 import org.apache.carbondata.core.datastore.impl.FileFactory;
 import org.apache.carbondata.core.memory.MemoryException;
@@ -36,14 +37,15 @@ import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema;
 import org.apache.carbondata.core.reader.CarbonHeaderReader;
 import org.apache.carbondata.core.statusmanager.LoadMetadataDetails;
 import org.apache.carbondata.core.statusmanager.SegmentStatusManager;
+import org.apache.carbondata.core.util.ByteUtil;
 import org.apache.carbondata.core.util.CarbonUtil;
+import org.apache.carbondata.core.util.DataTypeUtil;
 import org.apache.carbondata.format.BlockletInfo3;
 import org.apache.carbondata.format.DataChunk2;
 import org.apache.carbondata.format.DataChunk3;
 import org.apache.carbondata.format.FileFooter3;
 import org.apache.carbondata.format.FileHeader;
 import org.apache.carbondata.format.TableInfo;
-
 import static org.apache.carbondata.core.constants.CarbonCommonConstants.DEFAULT_CHARSET;
 
 import org.apache.commons.cli.CommandLine;
@@ -53,19 +55,19 @@ import org.apache.commons.cli.CommandLine;
  */
 class DataSummary implements Command {
   private String dataFolder;
-  private PrintStream out;
+  private List<String> outPuts;
 
   // file path mapping to file object
   private LinkedHashMap<String, DataFile> dataFiles;
 
-  DataSummary(String dataFolder, PrintStream out) {
+  DataSummary(String dataFolder, List<String> outPuts) {
     this.dataFolder = dataFolder;
-    this.out = out;
+    this.outPuts = outPuts;
   }
 
   @Override
   public void run(CommandLine line) throws IOException, MemoryException {
-    FileCollector collector = new FileCollector(out);
+    FileCollector collector = new FileCollector(outPuts);
     collector.collectFiles(dataFolder);
     collector.printBasicStats();
     if (collector.getNumDataFiles() == 0) {
@@ -78,47 +80,59 @@ class DataSummary implements Command {
     }
     if (line.hasOption("s") || printAll) {
       if (dataFiles.size() > 0) {
-        printSchema(dataFiles.entrySet().iterator().next().getValue());
+        collectSchemaDetails(dataFiles.entrySet().iterator().next().getValue());
       }
     }
     if (line.hasOption("m") || printAll) {
-      printSegments(collector.getTableStatusFile());
+      collectSegmentsDetails(collector.getTableStatusFile());
     }
     if (line.hasOption("t") || printAll) {
-      printTableProperties(collector.getSchemaFile());
+      collectTableProperties(collector.getSchemaFile());
     }
     if (line.hasOption("b") || printAll) {
-      printBlockletDetail();
+      String limitSize = line.getOptionValue("b");
+      if (limitSize == null) {
+        // by default we can limit the output to two shards and user can increase this limit
+        limitSize = "2";
+      }
+      collectBlockletDetail(Integer.parseInt(limitSize));
+    }
+    if (line.hasOption("v") || printAll) {
+      collectVersionDetails();
+    }
+    if (line.hasOption("B")) {
+      String blockFileName = line.getOptionValue("B");
+      collectBlockDetails(blockFileName);
     }
     if (line.hasOption("c")) {
       String columName = line.getOptionValue("c");
       printColumnStats(columName);
       if (line.hasOption("k")) {
-        printColumnChunkMeta(columName);
+        collectColumnChunkMeta(columName);
       }
     }
   }
 
-  private void printSchema(DataFile dataFile) throws IOException {
+  private void collectSchemaDetails(DataFile dataFile) throws IOException {
     CarbonFile file = FileFactory.getCarbonFile(dataFile.getFilePath());
-    out.println();
-    out.println("## Schema");
-    out.println(String.format("schema in %s", file.getName()));
+    outPuts.add("");
+    outPuts.add("## Schema");
+    outPuts.add(String.format("schema in %s", file.getName()));
     CarbonHeaderReader reader = new CarbonHeaderReader(file.getPath());
     FileHeader header = reader.readHeader();
-    out.println("version: V" + header.version);
-    out.println("timestamp: " + new java.sql.Timestamp(header.time_stamp));
+    outPuts.add("version: V" + header.version);
+    outPuts.add("timestamp: " + new java.sql.Timestamp(header.time_stamp));
     List<ColumnSchema> columns = reader.readSchema();
-    TablePrinter printer = new TablePrinter(
+    TableFormatter tableFormatter = new TableFormatter(
         new String[]{"Column Name", "Data Type", "Column Type",
-            "SortColumn", "Encoding", "Ordinal", "Id"});
+            "SortColumn", "Encoding", "Ordinal", "Id"}, outPuts);
     for (ColumnSchema column : columns) {
       String shortColumnId = "NA";
       if (column.getColumnUniqueId() != null && column.getColumnUniqueId().length() > 4) {
         shortColumnId = "*" +
             column.getColumnUniqueId().substring(column.getColumnUniqueId().length() - 4);
       }
-      printer.addRow(new String[]{
+      tableFormatter.addRow(new String[]{
           column.getColumnName(),
           column.getDataType().getName(),
           column.isDimensionColumn() ? "dimension" : "measure",
@@ -128,19 +142,19 @@ class DataSummary implements Command {
           shortColumnId
       });
     }
-    printer.printFormatted(out);
+    tableFormatter.printFormatted();
   }
 
-  private void printSegments(CarbonFile tableStatusFile) throws IOException {
-    out.println();
-    out.println("## Segment");
+  private void collectSegmentsDetails(CarbonFile tableStatusFile) throws IOException {
+    outPuts.add("");
+    outPuts.add("## Segment");
     if (tableStatusFile != null) {
       // first collect all information in memory then print a formatted table
       LoadMetadataDetails[] segments =
           SegmentStatusManager.readTableStatusFile(tableStatusFile.getPath());
-      TablePrinter printer = new TablePrinter(
+      TableFormatter tableFormatter = new TableFormatter(
           new String[]{"SegmentID", "Status", "Load Start", "Load End",
-              "Merged To", "Format", "Data Size", "Index Size"});
+              "Merged To", "Format", "Data Size", "Index Size"}, outPuts);
       for (LoadMetadataDetails segment : segments) {
         String dataSize, indexSize;
         if (segment.getDataSize() == null) {
@@ -153,7 +167,7 @@ class DataSummary implements Command {
         } else {
           indexSize = Strings.formatSize(Long.parseLong(segment.getIndexSize()));
         }
-        printer.addRow(new String[]{
+        tableFormatter.addRow(new String[]{
             segment.getLoadName(),
             segment.getSegmentStatus().toString(),
             new java.sql.Date(segment.getLoadStartTime()).toString(),
@@ -164,39 +178,38 @@ class DataSummary implements Command {
             indexSize}
         );
       }
-      printer.printFormatted(out);
+      tableFormatter.printFormatted();
     } else {
-      out.println("table status file not found");
+      outPuts.add("table status file not found");
     }
   }
 
-  private void printTableProperties(CarbonFile schemaFile) throws IOException {
-    out.println();
-    out.println("## Table Properties");
+  private void collectTableProperties(CarbonFile schemaFile) throws IOException {
+    outPuts.add("");
+    outPuts.add("## Table Properties");
     if (schemaFile != null) {
       TableInfo thriftTableInfo = CarbonUtil.readSchemaFile(schemaFile.getPath());
       Map<String, String> tblProperties = thriftTableInfo.fact_table.tableProperties;
-      TablePrinter printer = new TablePrinter(
-          new String[]{"Property Name", "Property Value"});
+      TableFormatter tableFormatter = new TableFormatter(
+          new String[]{"Property Name", "Property Value"}, outPuts);
       for (Map.Entry<String, String> entry : tblProperties.entrySet()) {
-        printer.addRow(new String[] {
+        tableFormatter.addRow(new String[] {
             String.format("'%s'", entry.getKey()),
             String.format("'%s'", entry.getValue())
         });
       }
-      printer.printFormatted(out);
+      tableFormatter.printFormatted();
     } else {
-      out.println("schema file not found");
+      outPuts.add("schema file not found");
     }
   }
 
-  private void printBlockletDetail() {
-    out.println();
-    out.println("## Block Detail");
+  private void collectBlockletDetail(int limitSize) {
+    outPuts.add("");
+    outPuts.add("## Block Detail");
 
-    ShardPrinter printer = new ShardPrinter(new String[]{
-        "BLK", "BLKLT", "NumPages", "NumRows", "Size"
-    });
+    ShardPrinter printer =
+        new ShardPrinter(new String[] { "BLK", "BLKLT", "NumPages", "NumRows", "Size" }, outPuts);
 
     for (Map.Entry<String, DataFile> entry : dataFiles.entrySet()) {
       DataFile file = entry.getValue();
@@ -211,8 +224,50 @@ class DataSummary implements Command {
             Strings.formatSize(file.getBlockletSizeInBytes(blockletId))
         });
       }
+      limitSize--;
+      if (limitSize == 0) {
+        break;
+      }
+    }
+    printer.collectFormattedData();
+  }
+
+  private void collectBlockDetails(String blockFilePath) throws IOException {
+    outPuts.add("");
+    outPuts.add("## Filtered Block Details for: " + blockFilePath
+        .substring(blockFilePath.lastIndexOf(File.separator) + 1, blockFilePath.length()));
+    TableFormatter tableFormatter =
+        new TableFormatter(new String[] { "BLKLT", "NumPages", "NumRows", "Size" }, outPuts);
+    CarbonFile datafile = FileFactory.getCarbonFile(blockFilePath);
+    DataFile dataFile = new DataFile(datafile);
+    dataFile.collectAllMeta();
+    FileFooter3 footer = dataFile.getFooter();
+    for (int blockletId = 0; blockletId < footer.blocklet_info_list3.size(); blockletId++) {
+      BlockletInfo3 blocklet = footer.blocklet_info_list3.get(blockletId);
+      tableFormatter.addRow(new String[]{
+          String.valueOf(blockletId),
+          String.format("%,d", blocklet.number_number_of_pages),
+          String.format("%,d", blocklet.num_rows),
+          Strings.formatSize(dataFile.getBlockletSizeInBytes(blockletId))
+      });
+    }
+    tableFormatter.printFormatted();
+  }
+
+  private void collectVersionDetails() {
+    DataFile file = dataFiles.entrySet().iterator().next().getValue();
+    FileFooter3 footer = file.getFooter();
+    if (null != footer.getExtra_info()) {
+      outPuts.add("");
+      outPuts.add("## version Details");
+      TableFormatter tableFormatter =
+          new TableFormatter(new String[] { "written_by", "Version" }, outPuts);
+      tableFormatter.addRow(new String[] { String.format("%s",
+          footer.getExtra_info().get(CarbonCommonConstants.CARBON_WRITTEN_BY_FOOTER_INFO)),
+          String.format("%s",
+              footer.getExtra_info().get(CarbonCommonConstants.CARBON_WRITTEN_VERSION)) });
+      tableFormatter.printFormatted();
     }
-    printer.printFormatted(out);
   }
 
   private int getColumnIndex(String columnName) {
@@ -226,25 +281,57 @@ class DataSummary implements Command {
   private boolean collected = false;
 
   private void printColumnStats(String columnName) throws IOException, MemoryException {
-    out.println();
-    out.println("## Column Statistics for '" + columnName + "'");
+    outPuts.add("");
+    outPuts.add("## Column Statistics for '" + columnName + "'");
     collectStats(columnName);
 
     int columnIndex = getColumnIndex(columnName);
     String[] header = new String[]{"BLK", "BLKLT", "Meta Size", "Data Size",
-        "LocalDict", "DictEntries", "DictSize", "AvgPageSize", "Min%", "Max%"};
+        "LocalDict", "DictEntries", "DictSize", "AvgPageSize", "Min%", "Max%", "Min", "Max"};
 
-    ShardPrinter printer = new ShardPrinter(header);
+    ShardPrinter printer = new ShardPrinter(header, outPuts);
     for (Map.Entry<String, DataFile> entry : dataFiles.entrySet()) {
       DataFile file = entry.getValue();
       for (DataFile.Blocklet blocklet : file.getAllBlocklets()) {
-        String min, max;
+        String min, max, minPercent, maxPercent;
+        byte[] blockletMin = blocklet.getColumnChunk().min;
+        byte[] blockletMax = blocklet.getColumnChunk().max;
         if (blocklet.getColumnChunk().getDataType() == DataTypes.STRING) {
-          min = new String(blocklet.getColumnChunk().min, Charset.forName(DEFAULT_CHARSET));
-          max = new String(blocklet.getColumnChunk().max, Charset.forName(DEFAULT_CHARSET));
+          minPercent = "NA";
+          maxPercent = "NA";
+          // for complex types min max can be given as NA and for varchar where min max is not
+          // written, can give NA
+          if (blocklet.getColumnChunk().column.getColumnName().contains(".val") || blocklet
+              .getColumnChunk().column.getColumnName().contains(".") || !blocklet
+              .getColumnChunk().isMinMaxPresent) {
+            min = "NA";
+            max = "NA";
+          } else {
+            min = new String(blockletMin, Charset.forName(DEFAULT_CHARSET));
+            max = new String(blockletMax, Charset.forName(DEFAULT_CHARSET));
+          }
         } else {
-          min = String.format("%.1f", blocklet.getColumnChunk().getMinPercentage() * 100);
-          max = String.format("%.1f", blocklet.getColumnChunk().getMaxPercentage() * 100);
+          minPercent = String.format("%.1f", blocklet.getColumnChunk().getMinPercentage() * 100);
+          maxPercent = String.format("%.1f", blocklet.getColumnChunk().getMaxPercentage() * 100);
+          DataFile.ColumnChunk columnChunk = blocklet.columnChunk;
+          if (columnChunk.column.isDimensionColumn() && DataTypeUtil
+              .isPrimitiveColumn(columnChunk.column.getDataType())) {
+            min = DataTypeUtil.getDataBasedOnDataTypeForNoDictionaryColumn(blockletMin,
+                columnChunk.column.getDataType()).toString();
+            max = DataTypeUtil.getDataBasedOnDataTypeForNoDictionaryColumn(blockletMax,
+                columnChunk.column.getDataType()).toString();
+          } else {
+            if (blockletMin.length > 4) {
+              min = String.valueOf(ByteUtil.toLong(blockletMin, 0, blockletMin.length));
+            } else {
+              min = String.valueOf(ByteUtil.toInt(blockletMin, 0, blockletMin.length));
+            }
+            if (blockletMax.length > 4) {
+              max = String.valueOf(ByteUtil.toLong(blockletMax, 0, blockletMax.length));
+            } else {
+              max = String.valueOf(ByteUtil.toInt(blockletMax, 0, blockletMax.length));
+            }
+          }
         }
         printer.addRow(
             blocklet.getShardName(),
@@ -257,12 +344,14 @@ class DataSummary implements Command {
                 String.valueOf(blocklet.getColumnChunk().blockletDictionaryEntries),
                 Strings.formatSize(blocklet.getColumnChunk().blocketletDictionarySize),
                 Strings.formatSize(blocklet.getColumnChunk().avgPageLengthInBytes),
+                minPercent,
+                maxPercent,
                 min,
                 max}
         );
       }
     }
-    printer.printFormatted(out);
+    printer.collectFormattedData();
   }
 
   private void collectStats(String columnName) throws IOException, MemoryException {
@@ -275,25 +364,25 @@ class DataSummary implements Command {
     }
   }
 
-  private void printColumnChunkMeta(String columnName) throws IOException, MemoryException {
-    out.println();
+  private void collectColumnChunkMeta(String columnName) throws IOException, MemoryException {
     DataFile file = dataFiles.entrySet().iterator().next().getValue();
-    out.println("## Page Meta for column '" + columnName + "' in file " + file.getFilePath());
+    outPuts.add("");
+    outPuts.add("## Page Meta for column '" + columnName + "' in file " + file.getFilePath());
     collectStats(columnName);
     for (int i = 0; i < file.getAllBlocklets().size(); i++) {
       DataFile.Blocklet blocklet = file.getAllBlocklets().get(i);
       DataChunk3 dataChunk3 = blocklet.getColumnChunk().getDataChunk3();
       List<DataChunk2> dataChunk2List = dataChunk3.getData_chunk_list();
-      out.println(String.format("Blocklet %d:", i));
+      outPuts.add(String.format("Blocklet %d:", i));
 
       // There will be many pages, for debugging purpose,
       // just print 3 page for each blocklet is enough
       for (int j = 0; j < dataChunk2List.size() && j < 3; j++) {
-        out.println(String.format("Page %d (offset %d, length %d): %s",
+        outPuts.add(String.format("Page %d (offset %d, length %d): %s",
             j, dataChunk3.page_offset.get(j), dataChunk3.page_length.get(j),
             dataChunk2List.get(j).toString()));
       }
-      out.println("\n");
+      outPuts.add("");
     }
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e0baa9b9/tools/cli/src/main/java/org/apache/carbondata/tool/FileCollector.java
----------------------------------------------------------------------
diff --git a/tools/cli/src/main/java/org/apache/carbondata/tool/FileCollector.java b/tools/cli/src/main/java/org/apache/carbondata/tool/FileCollector.java
index 6e3297f..aa48b93 100644
--- a/tools/cli/src/main/java/org/apache/carbondata/tool/FileCollector.java
+++ b/tools/cli/src/main/java/org/apache/carbondata/tool/FileCollector.java
@@ -18,7 +18,6 @@
 package org.apache.carbondata.tool;
 
 import java.io.IOException;
-import java.io.PrintStream;
 import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.LinkedHashMap;
@@ -43,16 +42,16 @@ class FileCollector {
   private long numPage;
   private long numRow;
   private long totalDataSize;
+  private List<String> outPuts;
 
   // file path mapping to file object
   private LinkedHashMap<String, DataFile> dataFiles = new LinkedHashMap<>();
   private CarbonFile tableStatusFile;
   private CarbonFile schemaFile;
 
-  private PrintStream out;
 
-  FileCollector(PrintStream out) {
-    this.out = out;
+  FileCollector(List<String> outPuts) {
+    this.outPuts = outPuts;
   }
 
   void collectFiles(String dataFolder) throws IOException {
@@ -73,8 +72,9 @@ class FileCollector {
       } else if (file.getName().startsWith(CarbonTablePath.SCHEMA_FILE)) {
         schemaFile = file;
       } else if (isStreamFile(file.getName())) {
-        out.println("WARN: input path contains streaming file, this tool does not support it yet, "
-            + "skipping it...");
+        outPuts.add(("WARN: input path contains streaming file, this tool does not support it yet, "
+            + "skipping it..."));
+
       }
     }
     unsortedFiles.sort((o1, o2) -> {
@@ -133,15 +133,16 @@ class FileCollector {
       System.out.println("no data file found");
       return;
     }
-    out.println("## Summary");
-    out.println(
-        String.format("total: %,d blocks, %,d shards, %,d blocklets, %,d pages, %,d rows, %s",
-            numBlock, numShard, numBlocklet, numPage, numRow, Strings.formatSize(totalDataSize)));
-    out.println(
-        String.format("avg: %s/block, %s/blocklet, %,d rows/block, %,d rows/blocklet",
-            Strings.formatSize((float) totalDataSize / numBlock),
-            Strings.formatSize((float) totalDataSize / numBlocklet),
-            numRow / numBlock,
-            numRow / numBlocklet));
+    outPuts.add("## Summary");
+    String format = String
+        .format("total: %,d blocks, %,d shards, %,d blocklets, %,d pages, %,d rows, %s", numBlock,
+            numShard, numBlocklet, numPage, numRow, Strings.formatSize(totalDataSize));
+    outPuts.add(format);
+
+    String format1 = String.format("avg: %s/block, %s/blocklet, %,d rows/block, %,d rows/blocklet",
+        Strings.formatSize((float) totalDataSize / numBlock),
+        Strings.formatSize((float) totalDataSize / numBlocklet), numRow / numBlock,
+        numRow / numBlocklet);
+    outPuts.add(format1);
   }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e0baa9b9/tools/cli/src/main/java/org/apache/carbondata/tool/ScanBenchmark.java
----------------------------------------------------------------------
diff --git a/tools/cli/src/main/java/org/apache/carbondata/tool/ScanBenchmark.java b/tools/cli/src/main/java/org/apache/carbondata/tool/ScanBenchmark.java
index 805d4e8..af5bdb3 100644
--- a/tools/cli/src/main/java/org/apache/carbondata/tool/ScanBenchmark.java
+++ b/tools/cli/src/main/java/org/apache/carbondata/tool/ScanBenchmark.java
@@ -18,7 +18,7 @@
 package org.apache.carbondata.tool;
 
 import java.io.IOException;
-import java.io.PrintStream;
+import java.util.List;
 import java.util.Map;
 import java.util.concurrent.atomic.AtomicReference;
 
@@ -48,12 +48,12 @@ import org.apache.commons.cli.CommandLine;
 class ScanBenchmark implements Command {
 
   private String dataFolder;
-  private PrintStream out;
   private DataFile file;
+  private List<String> outPuts;
 
-  ScanBenchmark(String dataFolder, PrintStream out) {
+  ScanBenchmark(String dataFolder, List<String> outPuts) {
     this.dataFolder = dataFolder;
-    this.out = out;
+    this.outPuts = outPuts;
   }
 
   @Override
@@ -62,7 +62,7 @@ class ScanBenchmark implements Command {
       String filePath = line.getOptionValue("f");
       file = new DataFile(FileFactory.getCarbonFile(filePath));
     } else {
-      FileCollector collector = new FileCollector(out);
+      FileCollector collector = new FileCollector(outPuts);
       collector.collectFiles(dataFolder);
       if (collector.getNumDataFiles() == 0) {
         return;
@@ -71,7 +71,7 @@ class ScanBenchmark implements Command {
       file = dataFiles.entrySet().iterator().next().getValue();
     }
 
-    out.println("\n## Benchmark");
+    outPuts.add("\n## Benchmark");
     AtomicReference<FileHeader> fileHeaderRef = new AtomicReference<>();
     AtomicReference<FileFooter3> fileFoorterRef = new AtomicReference<>();
     AtomicReference<DataFileFooter> convertedFooterRef = new AtomicReference<>();
@@ -97,7 +97,7 @@ class ScanBenchmark implements Command {
 
     if (line.hasOption("c")) {
       String columnName = line.getOptionValue("c");
-      out.println("\nScan column '" + columnName + "'");
+      outPuts.add("\nScan column '" + columnName + "'");
 
       DataFileFooter footer = convertedFooterRef.get();
       AtomicReference<AbstractRawColumnChunk> columnChunk = new AtomicReference<>();
@@ -105,7 +105,7 @@ class ScanBenchmark implements Command {
       boolean dimension = file.getColumn(columnName).isDimensionColumn();
       for (int i = 0; i < footer.getBlockletList().size(); i++) {
         int blockletId = i;
-        out.println(String.format("Blocklet#%d: total size %s, %,d pages, %,d rows",
+        outPuts.add(String.format("Blocklet#%d: total size %s, %,d pages, %,d rows",
             blockletId,
             Strings.formatSize(file.getColumnDataSizeInBytes(blockletId, columnIndex)),
             footer.getBlockletList().get(blockletId).getNumberOfPages(),
@@ -139,7 +139,7 @@ class ScanBenchmark implements Command {
     start = System.nanoTime();
     op.run();
     end = System.nanoTime();
-    out.println(String.format("%s takes %,d us", opName, (end - start) / 1000));
+    outPuts.add(String.format("%s takes %,d us", opName, (end - start) / 1000));
   }
 
   private DataFileFooter readAndConvertFooter(DataFile file) throws IOException {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e0baa9b9/tools/cli/src/main/java/org/apache/carbondata/tool/ShardPrinter.java
----------------------------------------------------------------------
diff --git a/tools/cli/src/main/java/org/apache/carbondata/tool/ShardPrinter.java b/tools/cli/src/main/java/org/apache/carbondata/tool/ShardPrinter.java
index 05b6feb..20dece6 100644
--- a/tools/cli/src/main/java/org/apache/carbondata/tool/ShardPrinter.java
+++ b/tools/cli/src/main/java/org/apache/carbondata/tool/ShardPrinter.java
@@ -17,33 +17,35 @@
 
 package org.apache.carbondata.tool;
 
-import java.io.PrintStream;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 
 class ShardPrinter {
-  private Map<String, TablePrinter> shardPrinter = new HashMap<>();
+  private Map<String, TableFormatter> shardPrinter = new HashMap<>();
   private String[] header;
+  private List<String> outPuts;
 
-  ShardPrinter(String[] header) {
+  ShardPrinter(String[] header, List<String> outPuts) {
     this.header = header;
+    this.outPuts = outPuts;
   }
 
   void addRow(String shardName, String[] row) {
-    TablePrinter printer = shardPrinter.get(shardName);
-    if (printer == null) {
-      printer = new TablePrinter(header);
-      shardPrinter.put(shardName, printer);
+    TableFormatter tableFormatter = shardPrinter.get(shardName);
+    if (tableFormatter == null) {
+      tableFormatter = new TableFormatter(header, outPuts);
+      shardPrinter.put(shardName, tableFormatter);
     }
-    printer.addRow(row);
+    tableFormatter.addRow(row);
   }
 
-  void printFormatted(PrintStream out) {
+  void collectFormattedData() {
     int shardId = 1;
-    for (Map.Entry<String, TablePrinter> entry : shardPrinter.entrySet()) {
-      out.println(String.format("Shard #%d (%s)", shardId++, entry.getKey()));
-      entry.getValue().printFormatted(out);
-      out.println();
+    for (Map.Entry<String, TableFormatter> entry : shardPrinter.entrySet()) {
+      outPuts.add(String.format("Shard #%d (%s)", shardId++, entry.getKey()));
+      entry.getValue().printFormatted();
+      outPuts.add("");
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e0baa9b9/tools/cli/src/main/java/org/apache/carbondata/tool/TableFormatter.java
----------------------------------------------------------------------
diff --git a/tools/cli/src/main/java/org/apache/carbondata/tool/TableFormatter.java b/tools/cli/src/main/java/org/apache/carbondata/tool/TableFormatter.java
new file mode 100644
index 0000000..2c57cd1
--- /dev/null
+++ b/tools/cli/src/main/java/org/apache/carbondata/tool/TableFormatter.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.tool;
+
+import java.util.LinkedList;
+import java.util.List;
+
+class TableFormatter {
+  private List<String[]> table = new LinkedList<>();
+  private List<String> outPuts;
+
+  /**
+   * create a new Table Printer
+   * @param header table header
+   */
+  TableFormatter(String[] header, List<String> outPuts) {
+    this.table.add(header);
+    this.outPuts = outPuts;
+  }
+
+  void addRow(String[] row) {
+    table.add(row);
+  }
+
+  void printFormatted() {
+    // calculate the max length of each output field in the table
+    int padding = 2;
+    int[] maxLength = new int[table.get(0).length];
+    for (int i = 0; i < table.get(0).length; i++) {
+      for (String[] row : table) {
+        maxLength[i] = Math.max(maxLength[i], row[i].length());
+      }
+    }
+
+    for (String[] row : table) {
+      StringBuilder outString = new StringBuilder();
+      for (int i = 0; i < row.length; i++) {
+        outString.append(row[i]);
+        for (int num = 0; num < maxLength[i] + padding - row[i].length(); num++) {
+          outString.append(" ");
+        }
+      }
+      outPuts.add(outString.toString());
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e0baa9b9/tools/cli/src/main/java/org/apache/carbondata/tool/TablePrinter.java
----------------------------------------------------------------------
diff --git a/tools/cli/src/main/java/org/apache/carbondata/tool/TablePrinter.java b/tools/cli/src/main/java/org/apache/carbondata/tool/TablePrinter.java
deleted file mode 100644
index 2e02d2f..0000000
--- a/tools/cli/src/main/java/org/apache/carbondata/tool/TablePrinter.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.carbondata.tool;
-
-import java.io.PrintStream;
-import java.util.LinkedList;
-import java.util.List;
-
-class TablePrinter {
-  private List<String[]> table = new LinkedList<>();
-
-  /**
-   * create a new Table Printer
-   * @param header table header
-   */
-  TablePrinter(String[] header) {
-    this.table.add(header);
-  }
-
-  void addRow(String[] row) {
-    table.add(row);
-  }
-
-  void printFormatted(PrintStream out) {
-    // calculate the max length of each output field in the table
-    int padding = 2;
-    int[] maxLength = new int[table.get(0).length];
-    for (int i = 0; i < table.get(0).length; i++) {
-      for (String[] row : table) {
-        maxLength[i] = Math.max(maxLength[i], row[i].length());
-      }
-    }
-
-    for (String[] row : table) {
-      for (int i = 0; i < row.length; i++) {
-        out.print(row[i]);
-        for (int num = 0; num < maxLength[i] + padding - row[i].length(); num++) {
-          out.print(" ");
-        }
-      }
-      out.println();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e0baa9b9/tools/cli/src/test/java/org/apache/carbondata/tool/CarbonCliTest.java
----------------------------------------------------------------------
diff --git a/tools/cli/src/test/java/org/apache/carbondata/tool/CarbonCliTest.java b/tools/cli/src/test/java/org/apache/carbondata/tool/CarbonCliTest.java
index d53e3d9..002bc8d 100644
--- a/tools/cli/src/test/java/org/apache/carbondata/tool/CarbonCliTest.java
+++ b/tools/cli/src/test/java/org/apache/carbondata/tool/CarbonCliTest.java
@@ -102,7 +102,7 @@ public class CarbonCliTest {
             "## Table Properties\n"
           + "schema file not found"));
 
-    String[] args4 = {"-cmd", "summary", "-p", path, "-b"};
+    String[] args4 = {"-cmd", "summary", "-p", path, "-b", "7"};
     out = new ByteArrayOutputStream();
     stream = new PrintStream(out);
     CarbonCli.run(args4, stream);
@@ -125,14 +125,64 @@ public class CarbonCliTest {
     output = new String(out.toByteArray());
     Assert.assertTrue(
         output.contains(
-            "BLK  BLKLT  Meta Size  Data Size  LocalDict  DictEntries  DictSize  AvgPageSize  Min%    Max%    \n"
-          + "0    0      1.81KB     295.98KB   false      0            0.0B      11.77KB      robot0  robot1  \n"
-          + "0    1      1.81KB     295.99KB   false      0            0.0B      11.77KB      robot1  robot3  \n"
-          + "1    0      1.81KB     295.98KB   false      0            0.0B      11.77KB      robot3  robot4  \n"
-          + "1    1      1.81KB     295.99KB   false      0            0.0B      11.77KB      robot4  robot6  \n"
-          + "2    0      1.81KB     295.98KB   false      0            0.0B      11.77KB      robot6  robot7  \n"
-          + "2    1      1.81KB     295.98KB   false      0            0.0B      11.77KB      robot8  robot9  \n"
-          + "2    2      519.0B     74.06KB    false      0            0.0B      10.51KB      robot9  robot9  "));
+            "BLK  BLKLT  Meta Size  Data Size  LocalDict  DictEntries  DictSize  AvgPageSize  Min%  Max%  Min     Max     \n"
+          + "0    0      1.81KB     295.98KB   false      0            0.0B      11.77KB      NA    NA    robot0  robot1  \n"
+          + "0    1      1.81KB     295.99KB   false      0            0.0B      11.77KB      NA    NA    robot1  robot3  \n"
+          + "1    0      1.81KB     295.98KB   false      0            0.0B      11.77KB      NA    NA    robot3  robot4  \n"
+          + "1    1      1.81KB     295.99KB   false      0            0.0B      11.77KB      NA    NA    robot4  robot6  \n"
+          + "2    0      1.81KB     295.98KB   false      0            0.0B      11.77KB      NA    NA    robot6  robot7  \n"
+          + "2    1      1.81KB     295.98KB   false      0            0.0B      11.77KB      NA    NA    robot8  robot9  \n"
+          + "2    2      519.0B     74.06KB    false      0            0.0B      10.51KB      NA    NA    robot9  robot9 "));
+  }
+
+  @Test
+  public void testSummaryOutputAll() {
+    String[] args = {"-cmd", "summary", "-p", path, "-a", "-c", "age"};
+    ByteArrayOutputStream out = new ByteArrayOutputStream();
+    PrintStream stream = new PrintStream(out);
+    CarbonCli.run(args, stream);
+    String output = new String(out.toByteArray());
+    Assert.assertTrue(
+        output.contains(
+            "Input Folder: ./CarbonCliTest\n"
+          + "## Summary\n"
+          + "total: 6 blocks, 2 shards, 14 blocklets, 314 pages, 10,000,000 rows, 32.27MB\n"
+          + "avg: 5.38MB/block, 2.30MB/blocklet, 1,666,666 rows/block, 714,285 rows/blocklet\n"));
+
+    Assert.assertTrue(
+        output.contains(
+            "Column Name  Data Type  Column Type  SortColumn  Encoding          Ordinal  Id  \n"
+          + "name         STRING     dimension    true        [INVERTED_INDEX]  0        NA  \n"
+          + "age          INT        measure      false       []                1        NA  \n"));
+
+    Assert.assertTrue(
+        output.contains(
+            "## Table Properties\n"
+          + "schema file not found"));
+
+    Assert.assertTrue(
+        output.contains(
+            "BLK  BLKLT  NumPages  NumRows  Size    \n"
+            + "0    0      25        800,000  2.58MB  \n"
+            + "0    1      25        800,000  2.58MB  \n"
+            + "1    0      25        800,000  2.58MB  \n"
+            + "1    1      25        800,000  2.58MB"));
+
+    Assert.assertTrue(
+        output.contains(
+          "BLK  BLKLT  Meta Size  Data Size  LocalDict  DictEntries  DictSize  AvgPageSize  Min%  Max%   Min  Max      \n"
+        + "0    0      3.00KB     4.87MB     false      0            0.0B      93.76KB      0.0   100.0  0    2999990  \n"
+        + "0    1      3.00KB     2.29MB     false      0            0.0B      93.76KB      0.0   100.0  1    2999992  \n"
+        + "1    0      3.00KB     4.87MB     false      0            0.0B      93.76KB      0.0   100.0  3    2999993  \n"
+        + "1    1      3.00KB     2.29MB     false      0            0.0B      93.76KB      0.0   100.0  4    2999995  \n"
+        + "2    0      3.00KB     5.52MB     false      0            0.0B      93.76KB      0.0   100.0  6    2999997  \n"
+        + "2    1      3.00KB     2.94MB     false      0            0.0B      93.76KB      0.0   100.0  8    2999998  \n"
+        + "2    2      858.0B     586.84KB   false      0            0.0B      83.71KB      0.0   100.0  9    2999999 "));
+
+    Assert.assertTrue(output.contains(
+        "## version Details\n"
+            + "written_by  Version         \n"
+            + "TestUtil    1.6.0-SNAPSHOT"));
   }
 
   @Test