You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by ra...@apache.org on 2017/06/15 11:50:06 UTC

[01/42] carbondata git commit: fixed multiple dictionary server issue

Repository: carbondata
Updated Branches:
  refs/heads/branch-1.1 19aafe24c -> c05523d0d


fixed multiple dictionary server issue


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/f9fb2441
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/f9fb2441
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/f9fb2441

Branch: refs/heads/branch-1.1
Commit: f9fb244142220300b32b8d6c62da6d629cc8604d
Parents: 19aafe2
Author: kunal642 <ku...@knoldus.in>
Authored: Wed Mar 15 14:38:31 2017 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 12:43:37 2017 +0530

----------------------------------------------------------------------
 .../core/constants/CarbonCommonConstants.java   |  10 ++
 .../dictionary/client/DictionaryClient.java     |   8 +-
 .../generator/ServerDictionaryGenerator.java    |   5 +
 .../generator/key/DictionaryMessageType.java    |   3 +-
 .../dictionary/server/DictionaryServer.java     | 113 +++++++++++++++----
 .../server/DictionaryServerHandler.java         |   3 +
 .../dictionary/client/DictionaryClientTest.java |   4 +-
 .../spark/rdd/CarbonDataRDDFactory.scala        |  64 ++++++-----
 .../execution/command/carbonTableSchema.scala   |  29 +----
 .../spark/rdd/CarbonDataRDDFactory.scala        |  41 +++----
 .../execution/command/carbonTableSchema.scala   |  29 +----
 11 files changed, 179 insertions(+), 130 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/f9fb2441/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
index ee18321..269a75f 100644
--- a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
+++ b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
@@ -700,6 +700,16 @@ public final class CarbonCommonConstants {
   public static final String DICTIONARY_ONE_CHUNK_SIZE = "carbon.dictionary.chunk.size";
 
   /**
+   *  Dictionary Server Worker Threads
+   */
+  public static final String DICTIONARY_WORKER_THREADS = "dictionary.worker.threads";
+
+  /**
+   *  Dictionary Server Worker Threads
+   */
+  public static final String DICTIONARY_WORKER_THREADS_DEFAULT = "1";
+
+  /**
    * dictionary chunk default size
    */
   public static final String DICTIONARY_ONE_CHUNK_SIZE_DEFAULT = "10000";

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f9fb2441/core/src/main/java/org/apache/carbondata/core/dictionary/client/DictionaryClient.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/dictionary/client/DictionaryClient.java b/core/src/main/java/org/apache/carbondata/core/dictionary/client/DictionaryClient.java
index 39f747e..7910190 100644
--- a/core/src/main/java/org/apache/carbondata/core/dictionary/client/DictionaryClient.java
+++ b/core/src/main/java/org/apache/carbondata/core/dictionary/client/DictionaryClient.java
@@ -49,8 +49,10 @@ public class DictionaryClient {
    * @param port
    */
   public void startClient(String address, int port) {
+    LOGGER.audit("Starting client on " + address + " " + port);
     long start = System.currentTimeMillis();
-    workerGroup = new NioEventLoopGroup();
+    // Create an Event with 1 thread.
+    workerGroup = new NioEventLoopGroup(1);
     Bootstrap clientBootstrap = new Bootstrap();
     clientBootstrap.group(workerGroup).channel(NioSocketChannel.class)
         .handler(new ChannelInitializer<SocketChannel>() {
@@ -58,7 +60,9 @@ public class DictionaryClient {
             ChannelPipeline pipeline = ch.pipeline();
             // Based on length provided at header, it collects all packets
             pipeline
-                .addLast("LengthDecoder", new LengthFieldBasedFrameDecoder(1048576, 0, 2, 0, 2));
+                .addLast("LengthDecoder",
+                    new LengthFieldBasedFrameDecoder(1048576, 0,
+                        2, 0, 2));
             pipeline.addLast("DictionaryClientHandler", dictionaryClientHandler);
           }
         });

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f9fb2441/core/src/main/java/org/apache/carbondata/core/dictionary/generator/ServerDictionaryGenerator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/dictionary/generator/ServerDictionaryGenerator.java b/core/src/main/java/org/apache/carbondata/core/dictionary/generator/ServerDictionaryGenerator.java
index b2b9863..cd168b8 100644
--- a/core/src/main/java/org/apache/carbondata/core/dictionary/generator/ServerDictionaryGenerator.java
+++ b/core/src/main/java/org/apache/carbondata/core/dictionary/generator/ServerDictionaryGenerator.java
@@ -71,4 +71,9 @@ public class ServerDictionaryGenerator implements DictionaryGenerator<Integer, D
     }
   }
 
+  public void writeTableDictionaryData(String tableUniqueName) throws Exception {
+    TableDictionaryGenerator generator = tableMap.get(tableUniqueName);
+    generator.writeDictionaryData(tableUniqueName);
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f9fb2441/core/src/main/java/org/apache/carbondata/core/dictionary/generator/key/DictionaryMessageType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/dictionary/generator/key/DictionaryMessageType.java b/core/src/main/java/org/apache/carbondata/core/dictionary/generator/key/DictionaryMessageType.java
index 608b602..b3d1f9a 100644
--- a/core/src/main/java/org/apache/carbondata/core/dictionary/generator/key/DictionaryMessageType.java
+++ b/core/src/main/java/org/apache/carbondata/core/dictionary/generator/key/DictionaryMessageType.java
@@ -24,7 +24,8 @@ public enum DictionaryMessageType {
   DICT_GENERATION((byte) 1),
   TABLE_INTIALIZATION((byte) 2),
   SIZE((byte) 3),
-  WRITE_DICTIONARY((byte) 4);
+  WRITE_DICTIONARY((byte) 4),
+  WRITE_TABLE_DICTIONARY((byte) 5);
 
   final byte type;
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f9fb2441/core/src/main/java/org/apache/carbondata/core/dictionary/server/DictionaryServer.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/dictionary/server/DictionaryServer.java b/core/src/main/java/org/apache/carbondata/core/dictionary/server/DictionaryServer.java
index e2eaaa3..f86cd6b 100644
--- a/core/src/main/java/org/apache/carbondata/core/dictionary/server/DictionaryServer.java
+++ b/core/src/main/java/org/apache/carbondata/core/dictionary/server/DictionaryServer.java
@@ -18,8 +18,10 @@ package org.apache.carbondata.core.dictionary.server;
 
 import org.apache.carbondata.common.logging.LogService;
 import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
 import org.apache.carbondata.core.dictionary.generator.key.DictionaryMessage;
 import org.apache.carbondata.core.dictionary.generator.key.DictionaryMessageType;
+import org.apache.carbondata.core.util.CarbonProperties;
 
 import io.netty.bootstrap.ServerBootstrap;
 import io.netty.channel.ChannelInitializer;
@@ -43,44 +45,91 @@ public class DictionaryServer {
 
   private EventLoopGroup boss;
   private EventLoopGroup worker;
+  private int port;
+  private static Object lock = new Object();
+  private static DictionaryServer INSTANCE = null;
+
+  private DictionaryServer(int port) {
+    startServer(port);
+  }
+
+  public static DictionaryServer getInstance(int port) {
+    if (INSTANCE == null) {
+      synchronized (lock) {
+        if (INSTANCE == null) {
+          INSTANCE = new DictionaryServer(port);
+        }
+      }
+    }
+    return INSTANCE;
+  }
 
   /**
    * start dictionary server
    *
    * @param port
-   * @throws Exception
    */
-  public void startServer(int port) {
-    long start = System.currentTimeMillis();
+  private void startServer(int port) {
     dictionaryServerHandler = new DictionaryServerHandler();
-    boss = new NioEventLoopGroup();
-    worker = new NioEventLoopGroup();
+    String workerThreads = CarbonProperties.getInstance()
+        .getProperty(CarbonCommonConstants.DICTIONARY_WORKER_THREADS,
+            CarbonCommonConstants.DICTIONARY_WORKER_THREADS_DEFAULT);
+    boss = new NioEventLoopGroup(1);
+    worker = new NioEventLoopGroup(Integer.parseInt(workerThreads));
+    // Configure the server.
+    bindToPort(port);
+  }
+
+  /**
+   * Binds dictionary server to an available port.
+   *
+   * @param port
+   */
+  private void bindToPort(int port) {
+    long start = System.currentTimeMillis();
     // Configure the server.
-    try {
-      ServerBootstrap bootstrap = new ServerBootstrap();
-      bootstrap.group(boss, worker);
-      bootstrap.channel(NioServerSocketChannel.class);
-
-      bootstrap.childHandler(new ChannelInitializer<SocketChannel>() {
-        @Override public void initChannel(SocketChannel ch) throws Exception {
-          ChannelPipeline pipeline = ch.pipeline();
-          // Based on length provided at header, it collects all packets
-          pipeline.addLast("LengthDecoder", new LengthFieldBasedFrameDecoder(1048576, 0, 2, 0, 2));
-          pipeline.addLast("DictionaryServerHandler", dictionaryServerHandler);
+    int i = 0;
+    while (i < 10) {
+      int newPort = port + i;
+      try {
+        ServerBootstrap bootstrap = new ServerBootstrap();
+        bootstrap.group(boss, worker);
+        bootstrap.channel(NioServerSocketChannel.class);
+        bootstrap.childHandler(new ChannelInitializer<SocketChannel>() {
+          @Override public void initChannel(SocketChannel ch) throws Exception {
+            ChannelPipeline pipeline = ch.pipeline();
+            pipeline
+                .addLast("LengthDecoder",
+                    new LengthFieldBasedFrameDecoder(1048576, 0,
+                        2, 0, 2));
+            pipeline.addLast("DictionaryServerHandler", dictionaryServerHandler);
+          }
+        });
+        bootstrap.childOption(ChannelOption.SO_KEEPALIVE, true);
+        bootstrap.bind(newPort).sync();
+        LOGGER.audit("Dictionary Server started, Time spent " + (System.currentTimeMillis() - start)
+            + " Listening on port " + newPort);
+        this.port = newPort;
+        break;
+      } catch (Exception e) {
+        LOGGER.error(e, "Dictionary Server Failed to bind to port:");
+        if (i == 9) {
+          throw new RuntimeException("Dictionary Server Could not bind to any port");
         }
-      });
-      bootstrap.childOption(ChannelOption.SO_KEEPALIVE, true);
-      bootstrap.bind(port).sync();
-
-      LOGGER.info("Dictionary Server started, Time spent " + (System.currentTimeMillis() - start)
-          + " Listening on port " + port);
-    } catch (Exception e) {
-      LOGGER.error(e, "Dictionary Server Start Failed");
-      throw new RuntimeException(e);
+      }
+      i++;
     }
   }
 
   /**
+   *
+   * @return Port on which the DictionaryServer has started.
+   */
+  public int getPort() {
+    return port;
+  }
+
+  /**
    * shutdown dictionary server
    *
    * @throws Exception
@@ -93,6 +142,8 @@ public class DictionaryServer {
     worker.terminationFuture().sync();
   }
 
+
+
   /**
    * Write dictionary to the store.
    * @throws Exception
@@ -102,4 +153,16 @@ public class DictionaryServer {
     key.setType(DictionaryMessageType.WRITE_DICTIONARY);
     dictionaryServerHandler.processMessage(key);
   }
+
+  /**
+   *  Write Dictionary for one table.
+   * @throws Exception
+   */
+
+  public void writeTableDictionary(String uniqueTableName) throws Exception {
+    DictionaryMessage key = new DictionaryMessage();
+    key.setTableUniqueName(uniqueTableName);
+    key.setType(DictionaryMessageType.WRITE_TABLE_DICTIONARY);
+    dictionaryServerHandler.processMessage(key);
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f9fb2441/core/src/main/java/org/apache/carbondata/core/dictionary/server/DictionaryServerHandler.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/dictionary/server/DictionaryServerHandler.java b/core/src/main/java/org/apache/carbondata/core/dictionary/server/DictionaryServerHandler.java
index a14b675..946a43d 100644
--- a/core/src/main/java/org/apache/carbondata/core/dictionary/server/DictionaryServerHandler.java
+++ b/core/src/main/java/org/apache/carbondata/core/dictionary/server/DictionaryServerHandler.java
@@ -101,6 +101,9 @@ public class DictionaryServerHandler extends ChannelInboundHandlerAdapter {
       case WRITE_DICTIONARY :
         generatorForServer.writeDictionaryData();
         return 0;
+      case WRITE_TABLE_DICTIONARY:
+        generatorForServer.writeTableDictionaryData(key.getTableUniqueName());
+        return 0;
       default:
         return -1;
     }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f9fb2441/core/src/test/java/org/apache/carbondata/core/dictionary/client/DictionaryClientTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/carbondata/core/dictionary/client/DictionaryClientTest.java b/core/src/test/java/org/apache/carbondata/core/dictionary/client/DictionaryClientTest.java
index c7989e4..a96e364 100644
--- a/core/src/test/java/org/apache/carbondata/core/dictionary/client/DictionaryClientTest.java
+++ b/core/src/test/java/org/apache/carbondata/core/dictionary/client/DictionaryClientTest.java
@@ -91,8 +91,7 @@ public class DictionaryClientTest {
     metadata.addCarbonTable(carbonTable);
 
     // Start the server for testing the client
-    server = new DictionaryServer();
-    server.startServer(5678);
+    server = DictionaryServer.getInstance(5678);
   }
 
   @Test public void testClient() throws Exception {
@@ -159,7 +158,6 @@ public class DictionaryClientTest {
     client.shutDown();
 
     // Shutdown the server
-    server.shutdown();
   }
 
   @After public void tearDown() {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f9fb2441/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
index 51a56e9..8f4727a 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
@@ -347,7 +347,7 @@ object CarbonDataRDDFactory {
       storePath: String,
       columnar: Boolean,
       partitionStatus: String = CarbonCommonConstants.STORE_LOADSTATUS_SUCCESS,
-      result: Future[DictionaryServer],
+      result: Option[DictionaryServer],
       dataFrame: Option[DataFrame] = None,
       updateModel: Option[UpdateTableModel] = None): Unit = {
     val carbonTable = carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable
@@ -890,31 +890,28 @@ object CarbonDataRDDFactory {
         LOGGER.audit(s"Data load is failed for " +
             s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
         LOGGER.warn("Cannot write load metadata file as data load failed")
-        shutDownDictionaryServer(carbonLoadModel, result, false)
         throw new Exception(errorMessage)
       } else {
         val metadataDetails = status(0)._2
         if (!isAgg) {
-          val status = CarbonLoaderUtil.recordLoadMetadata(currentLoadCount, metadataDetails,
-            carbonLoadModel, loadStatus, loadStartTime)
-          if (!status) {
-            val errorMessage = "Dataload failed due to failure in table status updation."
-            LOGGER.audit("Data load is failed for " +
-                s"${ carbonLoadModel.getDatabaseName }.${
-                  carbonLoadModel
-                      .getTableName
-                }")
-            LOGGER.error("Dataload failed due to failure in table status updation.")
-            shutDownDictionaryServer(carbonLoadModel, result, false)
-            throw new Exception(errorMessage)
-          }
+            writeDictionary(carbonLoadModel, result, false)
+            val status = CarbonLoaderUtil.recordLoadMetadata(currentLoadCount, metadataDetails,
+              carbonLoadModel, loadStatus, loadStartTime)
+            if (!status) {
+              val errorMessage = "Dataload failed due to failure in table status updation."
+              LOGGER.audit("Data load is failed for " +
+                           s"${ carbonLoadModel.getDatabaseName }.${
+                             carbonLoadModel
+                               .getTableName
+                           }")
+              LOGGER.error("Dataload failed due to failure in table status updation.")
+              throw new Exception(errorMessage)
+            }
         } else if (!carbonLoadModel.isRetentionRequest) {
           // TODO : Handle it
           LOGGER.info("********Database updated**********")
         }
 
-        shutDownDictionaryServer(carbonLoadModel, result)
-
         if (CarbonCommonConstants.STORE_LOADSTATUS_PARTIAL_SUCCESS.equals(loadStatus)) {
           LOGGER.audit("Data load is partially successful for " +
                        s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
@@ -935,22 +932,27 @@ object CarbonDataRDDFactory {
 
   }
 
-  private def shutDownDictionaryServer(carbonLoadModel: CarbonLoadModel,
-      result: Future[DictionaryServer], writeDictionary: Boolean = true): Unit = {
+  private def writeDictionary(carbonLoadModel: CarbonLoadModel,
+      result: Option[DictionaryServer], writeAll: Boolean) = {
     // write dictionary file and shutdown dictionary server
-    if (carbonLoadModel.getUseOnePass) {
-      try {
-        val server = result.get()
-        if (writeDictionary) {
-          server.writeDictionary()
+    val uniqueTableName: String = s"${ carbonLoadModel.getDatabaseName }_${
+      carbonLoadModel.getTableName }"
+    result match {
+      case Some(server) =>
+        try {
+          if (writeAll) {
+            server.writeDictionary()
+          }
+          else {
+            server.writeTableDictionary(uniqueTableName)
+          }
+        } catch {
+          case ex: Exception =>
+            LOGGER.error(s"Error while writing dictionary file for $uniqueTableName")
+            throw new Exception("Dataload failed due to error while writing dictionary file!")
         }
-        server.shutdown()
-      } catch {
-        case ex: Exception =>
-          LOGGER.error("Error while close dictionary server and write dictionary file for " +
-                       s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
-          throw new Exception("Dataload failed due to error while write dictionary file!")
-      }
+      case _ =>
     }
   }
+
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f9fb2441/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
index c030321..c770e1b 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
@@ -18,10 +18,6 @@
 package org.apache.spark.sql.execution.command
 
 import java.io.File
-import java.util.concurrent.Callable
-import java.util.concurrent.Executors
-import java.util.concurrent.ExecutorService
-import java.util.concurrent.Future
 
 import scala.collection.JavaConverters._
 import scala.language.implicitConversions
@@ -461,9 +457,6 @@ case class LoadTable(
 
       val partitionStatus = CarbonCommonConstants.STORE_LOADSTATUS_SUCCESS
 
-      var result: Future[DictionaryServer] = null
-      var executorService: ExecutorService = null
-
       try {
         // First system has to partition the data first and then call the load data
         LOGGER.info(s"Initiating Direct Load for the Table : ($dbName.$tableName)")
@@ -500,26 +493,18 @@ case class LoadTable(
           val dictionaryServerPort = CarbonProperties.getInstance()
             .getProperty(CarbonCommonConstants.DICTIONARY_SERVER_PORT,
               CarbonCommonConstants.DICTIONARY_SERVER_PORT_DEFAULT)
-          carbonLoadModel.setDictionaryServerPort(Integer.parseInt(dictionaryServerPort))
           val sparkDriverHost = sqlContext.sparkContext.getConf.get("spark.driver.host")
           carbonLoadModel.setDictionaryServerHost(sparkDriverHost)
           // start dictionary server when use one pass load.
-          executorService = Executors.newFixedThreadPool(1)
-          result = executorService.submit(new Callable[DictionaryServer]() {
-            @throws[Exception]
-            def call: DictionaryServer = {
-              Thread.currentThread().setName("Dictionary server")
-              val server: DictionaryServer = new DictionaryServer
-              server.startServer(dictionaryServerPort.toInt)
-              server
-            }
-          })
+          val server: DictionaryServer = DictionaryServer
+            .getInstance(dictionaryServerPort.toInt)
+          carbonLoadModel.setDictionaryServerPort(server.getPort)
           CarbonDataRDDFactory.loadCarbonData(sqlContext,
             carbonLoadModel,
             relation.tableMeta.storePath,
             columnar,
             partitionStatus,
-            result,
+            Some(server),
             dataFrame,
             updateModel)
         } else {
@@ -564,7 +549,7 @@ case class LoadTable(
             relation.tableMeta.storePath,
             columnar,
             partitionStatus,
-            result,
+            None,
             loadDataFrame,
             updateModel)
         }
@@ -576,10 +561,6 @@ case class LoadTable(
       } finally {
         // Once the data load is successful delete the unwanted partition files
         try {
-          // shutdown dictionary server thread
-          if (carbonLoadModel.getUseOnePass) {
-            executorService.shutdownNow()
-          }
           val fileType = FileFactory.getFileType(partitionLocation)
           if (FileFactory.isFileExist(partitionLocation, fileType)) {
             val file = FileFactory

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f9fb2441/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala b/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
index 4656c2e..835af35 100644
--- a/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
+++ b/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
@@ -26,7 +26,7 @@ import scala.collection.mutable.ListBuffer
 import scala.util.Random
 import scala.util.control.Breaks._
 
-import org.apache.hadoop.conf.{Configurable, Configuration}
+import org.apache.hadoop.conf.Configuration
 import org.apache.hadoop.fs.Path
 import org.apache.hadoop.mapreduce.Job
 import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat, FileSplit}
@@ -360,7 +360,7 @@ object CarbonDataRDDFactory {
       storePath: String,
       columnar: Boolean,
       partitionStatus: String = CarbonCommonConstants.STORE_LOADSTATUS_SUCCESS,
-      result: Future[DictionaryServer],
+      result: Option[DictionaryServer],
       dataFrame: Option[DataFrame] = None,
       updateModel: Option[UpdateTableModel] = None): Unit = {
     val carbonTable = carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable
@@ -807,7 +807,6 @@ object CarbonDataRDDFactory {
         LOGGER.audit(s"Data load is failed for " +
             s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
         LOGGER.warn("Cannot write load metadata file as data load failed")
-        shutdownDictionaryServer(carbonLoadModel, result, false)
         throw new Exception(errorMessage)
       } else {
         // if segment is empty then fail the data load
@@ -818,11 +817,11 @@ object CarbonDataRDDFactory {
                        s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }" +
                        " as there is no data to load")
           LOGGER.warn("Cannot write load metadata file as data load failed")
-          shutdownDictionaryServer(carbonLoadModel, result, false)
           throw new Exception("No Data to load")
         }
         val metadataDetails = status(0)._2
         if (!isAgg) {
+          writeDictionary(carbonLoadModel, result, false)
           val status = CarbonLoaderUtil.recordLoadMetadata(currentLoadCount, metadataDetails,
             carbonLoadModel, loadStatus, loadStartTime)
           if (!status) {
@@ -833,7 +832,6 @@ object CarbonDataRDDFactory {
                       .getTableName
                 }")
             LOGGER.error("Dataload failed due to failure in table status updation.")
-            shutdownDictionaryServer(carbonLoadModel, result, false)
             throw new Exception(errorMessage)
           }
         } else if (!carbonLoadModel.isRetentionRequest) {
@@ -841,7 +839,6 @@ object CarbonDataRDDFactory {
           LOGGER.info("********Database updated**********")
         }
 
-        shutdownDictionaryServer(carbonLoadModel, result)
         if (CarbonCommonConstants.STORE_LOADSTATUS_PARTIAL_SUCCESS.equals(loadStatus)) {
           LOGGER.audit("Data load is partially successful for " +
                        s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
@@ -862,22 +859,26 @@ object CarbonDataRDDFactory {
 
   }
 
-  private def shutdownDictionaryServer(carbonLoadModel: CarbonLoadModel,
-      result: Future[DictionaryServer], writeDictionary: Boolean = true) = {
+  private def writeDictionary(carbonLoadModel: CarbonLoadModel,
+      result: Option[DictionaryServer], writeAll: Boolean) = {
     // write dictionary file and shutdown dictionary server
-    if (carbonLoadModel.getUseOnePass) {
-      try {
-        val server = result.get()
-        if (writeDictionary) {
-          server.writeDictionary()
+    val uniqueTableName: String = s"${ carbonLoadModel.getDatabaseName }_${
+      carbonLoadModel.getTableName
+    }"
+    result match {
+      case Some(server) =>
+        try {
+          if (writeAll) {
+            server.writeDictionary()
+          } else {
+            server.writeTableDictionary(uniqueTableName)
+          }
+        } catch {
+          case ex: Exception =>
+            LOGGER.error(s"Error while writing dictionary file for $uniqueTableName")
+            throw new Exception("Dataload failed due to error while writing dictionary file!")
         }
-        server.shutdown()
-      } catch {
-        case ex: Exception =>
-          LOGGER.error("Error while close dictionary server and write dictionary file for " +
-                       s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
-          throw new Exception("Dataload failed due to error while write dictionary file!")
-      }
+      case _ =>
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f9fb2441/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
index 593de39..11b3115 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
@@ -18,10 +18,8 @@
 package org.apache.spark.sql.execution.command
 
 import java.io.File
-import java.util.concurrent.{Callable, Executors, ExecutorService, Future}
 
 import scala.collection.JavaConverters._
-import scala.collection.mutable.ListBuffer
 import scala.language.implicitConversions
 
 import org.apache.commons.lang3.StringUtils
@@ -468,9 +466,6 @@ case class LoadTable(
       carbonLoadModel.setAllDictPath(allDictionaryPath)
 
       val partitionStatus = CarbonCommonConstants.STORE_LOADSTATUS_SUCCESS
-      var result: Future[DictionaryServer] = null
-      var executorService: ExecutorService = null
-
       try {
         // First system has to partition the data first and then call the load data
         LOGGER.info(s"Initiating Direct Load for the Table : ($dbName.$tableName)")
@@ -506,27 +501,19 @@ case class LoadTable(
           val dictionaryServerPort = CarbonProperties.getInstance()
             .getProperty(CarbonCommonConstants.DICTIONARY_SERVER_PORT,
               CarbonCommonConstants.DICTIONARY_SERVER_PORT_DEFAULT)
-          carbonLoadModel.setDictionaryServerPort(Integer.parseInt(dictionaryServerPort))
           val sparkDriverHost = sparkSession.sqlContext.sparkContext.
             getConf.get("spark.driver.host")
           carbonLoadModel.setDictionaryServerHost(sparkDriverHost)
           // start dictionary server when use one pass load.
-          executorService = Executors.newFixedThreadPool(1)
-          result = executorService.submit(new Callable[DictionaryServer]() {
-            @throws[Exception]
-            def call: DictionaryServer = {
-              Thread.currentThread().setName("Dictionary server")
-              val server: DictionaryServer = new DictionaryServer
-              server.startServer(dictionaryServerPort.toInt)
-              server
-            }
-          })
+          val server: DictionaryServer = DictionaryServer
+            .getInstance(dictionaryServerPort.toInt)
+          carbonLoadModel.setDictionaryServerPort(server.getPort)
           CarbonDataRDDFactory.loadCarbonData(sparkSession.sqlContext,
             carbonLoadModel,
             relation.tableMeta.storePath,
             columnar,
             partitionStatus,
-            result,
+            Some(server),
             dataFrame,
             updateModel)
         }
@@ -575,7 +562,7 @@ case class LoadTable(
             relation.tableMeta.storePath,
             columnar,
             partitionStatus,
-            result,
+            None,
             loadDataFrame,
             updateModel)
         }
@@ -587,12 +574,6 @@ case class LoadTable(
       } finally {
         // Once the data load is successful delete the unwanted partition files
         try {
-
-          // shutdown dictionary server thread
-          if (carbonLoadModel.getUseOnePass) {
-            executorService.shutdownNow()
-          }
-
           val fileType = FileFactory.getFileType(partitionLocation)
           if (FileFactory.isFileExist(partitionLocation, fileType)) {
             val file = FileFactory


[30/42] carbondata git commit: Improve No dictionary column Include And Exclude filter Fixed Data mismatch issue Fixed Alter with Caps Decimal issue

Posted by ra...@apache.org.
Improve No dictionary column Include And Exclude filter
Fixed Data mismatch issue
Fixed Alter with Caps Decimal issue


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/357ab636
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/357ab636
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/357ab636

Branch: refs/heads/branch-1.1
Commit: 357ab636f7596d7e26bfd92657d708a659b9e718
Parents: bbcc487
Author: kumarvishal <ku...@gmail.com>
Authored: Wed May 31 15:49:54 2017 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 13:21:02 2017 +0530

----------------------------------------------------------------------
 .../schema/table/column/CarbonDimension.java    |   7 +
 .../carbondata/core/scan/filter/FilterUtil.java |  18 ++
 .../executer/ExcludeFilterExecuterImpl.java     | 120 +++++------
 .../executer/IncludeFilterExecuterImpl.java     |  89 ++++----
 .../executer/RangeValueFilterExecuterImpl.java  | 214 +++++++++++++------
 .../executer/RowLevelFilterExecuterImpl.java    |   5 +
 .../RowLevelRangeGrtThanFiterExecuterImpl.java  |  82 ++++---
 ...elRangeGrtrThanEquaToFilterExecuterImpl.java |  75 ++++---
 ...velRangeLessThanEqualFilterExecuterImpl.java | 106 +++++----
 .../RowLevelRangeLessThanFiterExecuterImpl.java | 113 ++++++----
 10 files changed, 503 insertions(+), 326 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/357ab636/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/column/CarbonDimension.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/column/CarbonDimension.java b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/column/CarbonDimension.java
index bdc7a4c..8d02512 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/column/CarbonDimension.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/column/CarbonDimension.java
@@ -122,6 +122,13 @@ public class CarbonDimension extends CarbonColumn {
   }
 
   /**
+   * @return is column participated in sorting or not
+   */
+  public boolean isSortColumn() {
+    return this.columnSchema.isSortColumn();
+  }
+
+  /**
    * to generate the hash code for this class
    */
   @Override public int hashCode() {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/357ab636/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterUtil.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterUtil.java
index 7799b6a..73387db 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterUtil.java
@@ -49,6 +49,7 @@ import org.apache.carbondata.core.constants.CarbonCommonConstants;
 import org.apache.carbondata.core.constants.CarbonV3DataFormatConstants;
 import org.apache.carbondata.core.datastore.IndexKey;
 import org.apache.carbondata.core.datastore.block.SegmentProperties;
+import org.apache.carbondata.core.datastore.chunk.DimensionColumnDataChunk;
 import org.apache.carbondata.core.keygenerator.KeyGenException;
 import org.apache.carbondata.core.keygenerator.KeyGenerator;
 import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier;
@@ -1456,4 +1457,21 @@ public final class FilterUtil {
     return bitSetGroup;
   }
 
+  /**
+   * This method will compare the selected data against null values and
+   * flip the bitSet if any null value is found
+   *
+   * @param dimensionColumnDataChunk
+   * @param bitSet
+   */
+  public static void removeNullValues(DimensionColumnDataChunk dimensionColumnDataChunk,
+      BitSet bitSet, byte[] defaultValue) {
+    if (!bitSet.isEmpty()) {
+      for (int i = bitSet.nextSetBit(0); i >= 0; i = bitSet.nextSetBit(i + 1)) {
+        if (dimensionColumnDataChunk.compareTo(i, defaultValue) == 0) {
+          bitSet.flip(i);
+        }
+      }
+    }
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/357ab636/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/ExcludeFilterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/ExcludeFilterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/ExcludeFilterExecuterImpl.java
index 7449781..23209ed 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/ExcludeFilterExecuterImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/ExcludeFilterExecuterImpl.java
@@ -22,8 +22,6 @@ import java.util.BitSet;
 import org.apache.carbondata.core.datastore.block.SegmentProperties;
 import org.apache.carbondata.core.datastore.chunk.DimensionColumnDataChunk;
 import org.apache.carbondata.core.datastore.chunk.impl.DimensionRawColumnChunk;
-import org.apache.carbondata.core.datastore.chunk.impl.FixedLengthDimensionDataChunk;
-import org.apache.carbondata.core.datastore.chunk.impl.VariableLengthDimensionDataChunk;
 import org.apache.carbondata.core.scan.filter.FilterUtil;
 import org.apache.carbondata.core.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
 import org.apache.carbondata.core.scan.processor.BlocksChunkHolder;
@@ -35,7 +33,10 @@ public class ExcludeFilterExecuterImpl implements FilterExecuter {
   protected DimColumnResolvedFilterInfo dimColEvaluatorInfo;
   protected DimColumnExecuterFilterInfo dimColumnExecuterInfo;
   protected SegmentProperties segmentProperties;
-
+  /**
+   * is dimension column data is natural sorted
+   */
+  private boolean isNaturalSorted;
   public ExcludeFilterExecuterImpl(DimColumnResolvedFilterInfo dimColEvaluatorInfo,
       SegmentProperties segmentProperties) {
     this.dimColEvaluatorInfo = dimColEvaluatorInfo;
@@ -43,6 +44,8 @@ public class ExcludeFilterExecuterImpl implements FilterExecuter {
     this.segmentProperties = segmentProperties;
     FilterUtil.prepareKeysFromSurrogates(dimColEvaluatorInfo.getFilterValues(), segmentProperties,
         dimColEvaluatorInfo.getDimension(), dimColumnExecuterInfo);
+    isNaturalSorted = dimColEvaluatorInfo.getDimension().isUseInvertedIndex() && dimColEvaluatorInfo
+        .getDimension().isSortColumn();
   }
 
   @Override public BitSetGroup applyFilter(BlocksChunkHolder blockChunkHolder) throws IOException {
@@ -69,96 +72,71 @@ public class ExcludeFilterExecuterImpl implements FilterExecuter {
 
   protected BitSet getFilteredIndexes(DimensionColumnDataChunk dimColumnDataChunk,
       int numerOfRows) {
-    // For high cardinality dimensions.
-    if (dimColumnDataChunk.isNoDicitionaryColumn()
-        && dimColumnDataChunk instanceof VariableLengthDimensionDataChunk) {
-      return setDirectKeyFilterIndexToBitSet((VariableLengthDimensionDataChunk) dimColumnDataChunk,
-          numerOfRows);
-    }
-    if (dimColumnDataChunk.isExplicitSorted()
-        && dimColumnDataChunk instanceof FixedLengthDimensionDataChunk) {
-      return setFilterdIndexToBitSetWithColumnIndex(
-          (FixedLengthDimensionDataChunk) dimColumnDataChunk, numerOfRows);
+    if (dimColumnDataChunk.isExplicitSorted()) {
+      return setFilterdIndexToBitSetWithColumnIndex(dimColumnDataChunk, numerOfRows);
     }
-    return setFilterdIndexToBitSet((FixedLengthDimensionDataChunk) dimColumnDataChunk, numerOfRows);
-  }
-
-  private BitSet setDirectKeyFilterIndexToBitSet(
-      VariableLengthDimensionDataChunk dimColumnDataChunk, int numerOfRows) {
-    BitSet bitSet = new BitSet(numerOfRows);
-    bitSet.flip(0, numerOfRows);
-    byte[][] filterValues = dimColumnExecuterInfo.getFilterKeys();
-    for (int i = 0; i < filterValues.length; i++) {
-      byte[] filterVal = filterValues[i];
-      if (dimColumnDataChunk.isExplicitSorted()) {
-        for (int index = 0; index < numerOfRows; index++) {
-          if (dimColumnDataChunk.compareTo(index, filterVal) == 0) {
-            bitSet.flip(dimColumnDataChunk.getInvertedIndex(index));
-          }
-        }
-      } else {
-        for (int index = 0; index < numerOfRows; index++) {
-          if (dimColumnDataChunk.compareTo(index, filterVal) == 0) {
-            bitSet.flip(index);
-          }
-        }
-      }
-    }
-    return bitSet;
-
+    return setFilterdIndexToBitSet(dimColumnDataChunk, numerOfRows);
   }
 
   private BitSet setFilterdIndexToBitSetWithColumnIndex(
-      FixedLengthDimensionDataChunk dimColumnDataChunk, int numerOfRows) {
-    int startKey = 0;
-    int last = 0;
-    int startIndex = 0;
+      DimensionColumnDataChunk dimensionColumnDataChunk, int numerOfRows) {
     BitSet bitSet = new BitSet(numerOfRows);
     bitSet.flip(0, numerOfRows);
+    int startIndex = 0;
     byte[][] filterValues = dimColumnExecuterInfo.getFilterKeys();
     for (int i = 0; i < filterValues.length; i++) {
-      startKey = CarbonUtil
-          .getFirstIndexUsingBinarySearch(dimColumnDataChunk, startIndex, numerOfRows - 1,
-              filterValues[i], false);
-      if (startKey < 0) {
-        continue;
-      }
-      bitSet.flip(dimColumnDataChunk.getInvertedIndex(startKey));
-      last = startKey;
-      for (int j = startKey + 1; j < numerOfRows; j++) {
-        if (dimColumnDataChunk.compareTo(j, filterValues[i]) == 0) {
-          bitSet.flip(dimColumnDataChunk.getInvertedIndex(j));
-          last++;
-        } else {
-          break;
-        }
-      }
-      startIndex = last;
       if (startIndex >= numerOfRows) {
         break;
       }
+      int[] rangeIndex = CarbonUtil
+          .getRangeIndexUsingBinarySearch(dimensionColumnDataChunk, startIndex, numerOfRows - 1,
+              filterValues[i]);
+      for (int j = rangeIndex[0]; j <= rangeIndex[1]; j++) {
+        bitSet.flip(dimensionColumnDataChunk.getInvertedIndex(j));
+      }
+      if (rangeIndex[1] >= 0) {
+        startIndex = rangeIndex[1] + 1;
+      }
     }
     return bitSet;
   }
 
-  // use binary search to replace for clause
-  private BitSet setFilterdIndexToBitSet(FixedLengthDimensionDataChunk dimColumnDataChunk,
+  private BitSet setFilterdIndexToBitSet(DimensionColumnDataChunk dimensionColumnDataChunk,
       int numerOfRows) {
     BitSet bitSet = new BitSet(numerOfRows);
     bitSet.flip(0, numerOfRows);
     byte[][] filterValues = dimColumnExecuterInfo.getFilterKeys();
-    if (filterValues.length > 1) {
-      for (int j = 0; j < numerOfRows; j++) {
-        int index = CarbonUtil.binarySearch(filterValues, 0, filterValues.length - 1,
-            dimColumnDataChunk.getChunkData(j));
-        if (index >= 0) {
+    // binary search can only be applied if column is sorted
+    if (isNaturalSorted) {
+      int startIndex = 0;
+      for (int i = 0; i < filterValues.length; i++) {
+        if (startIndex >= numerOfRows) {
+          break;
+        }
+        int[] rangeIndex = CarbonUtil
+            .getRangeIndexUsingBinarySearch(dimensionColumnDataChunk, startIndex, numerOfRows - 1,
+                filterValues[i]);
+        for (int j = rangeIndex[0]; j <= rangeIndex[1]; j++) {
           bitSet.flip(j);
         }
+        if (rangeIndex[1] >= 0) {
+          startIndex = rangeIndex[1] + 1;
+        }
       }
-    } else if (filterValues.length == 1) {
-      for (int j = 0; j < numerOfRows; j++) {
-        if (dimColumnDataChunk.compareTo(j, filterValues[0]) == 0) {
-          bitSet.flip(j);
+    } else {
+      if (filterValues.length > 1) {
+        for (int i = 0; i < numerOfRows; i++) {
+          int index = CarbonUtil.binarySearch(filterValues, 0, filterValues.length - 1,
+              dimensionColumnDataChunk.getChunkData(i));
+          if (index >= 0) {
+            bitSet.flip(i);
+          }
+        }
+      } else {
+        for (int j = 0; j < numerOfRows; j++) {
+          if (dimensionColumnDataChunk.compareTo(j, filterValues[0]) == 0) {
+            bitSet.flip(j);
+          }
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/357ab636/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/IncludeFilterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/IncludeFilterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/IncludeFilterExecuterImpl.java
index 7b8f084..8704496 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/IncludeFilterExecuterImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/IncludeFilterExecuterImpl.java
@@ -22,8 +22,6 @@ import java.util.BitSet;
 import org.apache.carbondata.core.datastore.block.SegmentProperties;
 import org.apache.carbondata.core.datastore.chunk.DimensionColumnDataChunk;
 import org.apache.carbondata.core.datastore.chunk.impl.DimensionRawColumnChunk;
-import org.apache.carbondata.core.datastore.chunk.impl.FixedLengthDimensionDataChunk;
-import org.apache.carbondata.core.datastore.chunk.impl.VariableLengthDimensionDataChunk;
 import org.apache.carbondata.core.scan.filter.FilterUtil;
 import org.apache.carbondata.core.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
 import org.apache.carbondata.core.scan.processor.BlocksChunkHolder;
@@ -36,6 +34,10 @@ public class IncludeFilterExecuterImpl implements FilterExecuter {
   protected DimColumnResolvedFilterInfo dimColumnEvaluatorInfo;
   protected DimColumnExecuterFilterInfo dimColumnExecuterInfo;
   protected SegmentProperties segmentProperties;
+  /**
+   * is dimension column data is natural sorted
+   */
+  private boolean isNaturalSorted;
 
   public IncludeFilterExecuterImpl(DimColumnResolvedFilterInfo dimColumnEvaluatorInfo,
       SegmentProperties segmentProperties) {
@@ -44,7 +46,9 @@ public class IncludeFilterExecuterImpl implements FilterExecuter {
     dimColumnExecuterInfo = new DimColumnExecuterFilterInfo();
     FilterUtil.prepareKeysFromSurrogates(dimColumnEvaluatorInfo.getFilterValues(),
         segmentProperties, dimColumnEvaluatorInfo.getDimension(), dimColumnExecuterInfo);
-
+    isNaturalSorted =
+        dimColumnEvaluatorInfo.getDimension().isUseInvertedIndex() && dimColumnEvaluatorInfo
+            .getDimension().isSortColumn();
   }
 
   @Override public BitSetGroup applyFilter(BlocksChunkHolder blockChunkHolder) throws IOException {
@@ -76,58 +80,29 @@ public class IncludeFilterExecuterImpl implements FilterExecuter {
 
   protected BitSet getFilteredIndexes(DimensionColumnDataChunk dimensionColumnDataChunk,
       int numerOfRows) {
-    if (dimensionColumnDataChunk.isNoDicitionaryColumn()
-        && dimensionColumnDataChunk instanceof VariableLengthDimensionDataChunk) {
-      return setDirectKeyFilterIndexToBitSet(
-          (VariableLengthDimensionDataChunk) dimensionColumnDataChunk, numerOfRows);
-    } else if (dimensionColumnDataChunk.isExplicitSorted()
-        && dimensionColumnDataChunk instanceof FixedLengthDimensionDataChunk) {
-      return setFilterdIndexToBitSetWithColumnIndex(
-          (FixedLengthDimensionDataChunk) dimensionColumnDataChunk, numerOfRows);
+    if (dimensionColumnDataChunk.isExplicitSorted()) {
+      return setFilterdIndexToBitSetWithColumnIndex(dimensionColumnDataChunk, numerOfRows);
     }
-
     return setFilterdIndexToBitSet(dimensionColumnDataChunk, numerOfRows);
   }
 
-  private BitSet setDirectKeyFilterIndexToBitSet(
-      VariableLengthDimensionDataChunk dimensionColumnDataChunk, int numerOfRows) {
-    BitSet bitSet = new BitSet(numerOfRows);
-    byte[][] filterValues = dimColumnExecuterInfo.getFilterKeys();
-    for (int i = 0; i < filterValues.length; i++) {
-      byte[] filterVal = filterValues[i];
-      if (dimensionColumnDataChunk.isExplicitSorted()) {
-        for (int index = 0; index < numerOfRows; index++) {
-          if (dimensionColumnDataChunk.compareTo(index, filterVal) == 0) {
-            bitSet.set(dimensionColumnDataChunk.getInvertedIndex(index));
-          }
-        }
-      } else {
-        for (int index = 0; index < numerOfRows; index++) {
-          if (dimensionColumnDataChunk.compareTo(index, filterVal) == 0) {
-            bitSet.set(index);
-          }
-        }
-      }
-    }
-    return bitSet;
-
-  }
-
   private BitSet setFilterdIndexToBitSetWithColumnIndex(
-      FixedLengthDimensionDataChunk dimensionColumnDataChunk, int numerOfRows) {
+      DimensionColumnDataChunk dimensionColumnDataChunk, int numerOfRows) {
     BitSet bitSet = new BitSet(numerOfRows);
     int startIndex = 0;
     byte[][] filterValues = dimColumnExecuterInfo.getFilterKeys();
     for (int i = 0; i < filterValues.length; i++) {
-      int[] rangeIndex = CarbonUtil.getRangeIndexUsingBinarySearch(dimensionColumnDataChunk,
-          startIndex, numerOfRows - 1, filterValues[i]);
+      if (startIndex >= numerOfRows) {
+        break;
+      }
+      int[] rangeIndex = CarbonUtil
+          .getRangeIndexUsingBinarySearch(dimensionColumnDataChunk, startIndex, numerOfRows - 1,
+              filterValues[i]);
       for (int j = rangeIndex[0]; j <= rangeIndex[1]; j++) {
-
         bitSet.set(dimensionColumnDataChunk.getInvertedIndex(j));
       }
-
       if (rangeIndex[1] >= 0) {
-        startIndex = rangeIndex[1];
+        startIndex = rangeIndex[1] + 1;
       }
     }
     return bitSet;
@@ -136,8 +111,26 @@ public class IncludeFilterExecuterImpl implements FilterExecuter {
   private BitSet setFilterdIndexToBitSet(DimensionColumnDataChunk dimensionColumnDataChunk,
       int numerOfRows) {
     BitSet bitSet = new BitSet(numerOfRows);
-    if (dimensionColumnDataChunk instanceof FixedLengthDimensionDataChunk) {
-      byte[][] filterValues = dimColumnExecuterInfo.getFilterKeys();
+    byte[][] filterValues = dimColumnExecuterInfo.getFilterKeys();
+    // binary search can only be applied if column is sorted and
+    // inverted index exists for that column
+    if (isNaturalSorted) {
+      int startIndex = 0;
+      for (int i = 0; i < filterValues.length; i++) {
+        if (startIndex >= numerOfRows) {
+          break;
+        }
+        int[] rangeIndex = CarbonUtil
+            .getRangeIndexUsingBinarySearch(dimensionColumnDataChunk, startIndex, numerOfRows - 1,
+                filterValues[i]);
+        for (int j = rangeIndex[0]; j <= rangeIndex[1]; j++) {
+          bitSet.set(j);
+        }
+        if (rangeIndex[1] >= 0) {
+          startIndex = rangeIndex[1] + 1;
+        }
+      }
+    } else {
       if (filterValues.length > 1) {
         for (int i = 0; i < numerOfRows; i++) {
           int index = CarbonUtil.binarySearch(filterValues, 0, filterValues.length - 1,
@@ -146,10 +139,10 @@ public class IncludeFilterExecuterImpl implements FilterExecuter {
             bitSet.set(i);
           }
         }
-      } else if (filterValues.length == 1) {
-        for (int i = 0; i < numerOfRows; i++) {
-          if (dimensionColumnDataChunk.compareTo(i, filterValues[0]) == 0) {
-            bitSet.set(i);
+      } else {
+        for (int j = 0; j < numerOfRows; j++) {
+          if (dimensionColumnDataChunk.compareTo(j, filterValues[0]) == 0) {
+            bitSet.set(j);
           }
         }
       }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/357ab636/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RangeValueFilterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RangeValueFilterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RangeValueFilterExecuterImpl.java
index a20f414..6823531 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RangeValueFilterExecuterImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RangeValueFilterExecuterImpl.java
@@ -24,7 +24,10 @@ import org.apache.carbondata.core.constants.CarbonCommonConstants;
 import org.apache.carbondata.core.datastore.block.SegmentProperties;
 import org.apache.carbondata.core.datastore.chunk.DimensionColumnDataChunk;
 import org.apache.carbondata.core.datastore.chunk.impl.DimensionRawColumnChunk;
+import org.apache.carbondata.core.keygenerator.directdictionary.DirectDictionaryGenerator;
+import org.apache.carbondata.core.keygenerator.directdictionary.DirectDictionaryKeyGeneratorFactory;
 import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier;
+import org.apache.carbondata.core.metadata.encoder.Encoding;
 import org.apache.carbondata.core.metadata.schema.table.column.CarbonDimension;
 import org.apache.carbondata.core.scan.expression.Expression;
 import org.apache.carbondata.core.scan.expression.conditional.GreaterThanEqualToExpression;
@@ -48,8 +51,6 @@ import org.apache.carbondata.core.util.CarbonUtil;
 public class RangeValueFilterExecuterImpl extends ValueBasedFilterExecuterImpl {
 
   private DimColumnResolvedFilterInfo dimColEvaluatorInfo;
-  private MeasureColumnResolvedFilterInfo msrColEvalutorInfo;
-  private AbsoluteTableIdentifier tableIdentifier;
   private Expression exp;
   private byte[][] filterRangesValues;
   private SegmentProperties segmentProperties;
@@ -78,10 +79,8 @@ public class RangeValueFilterExecuterImpl extends ValueBasedFilterExecuterImpl {
       SegmentProperties segmentProperties) {
 
     this.dimColEvaluatorInfo = dimColEvaluatorInfo;
-    this.msrColEvalutorInfo = msrColEvaluatorInfo;
     this.exp = exp;
     this.segmentProperties = segmentProperties;
-    this.tableIdentifier = tableIdentifier;
     this.filterRangesValues = filterRangeValues;
     this.lessThanExp = isLessThan();
     this.lessThanEqualExp = isLessThanEqualTo();
@@ -242,7 +241,7 @@ public class RangeValueFilterExecuterImpl extends ValueBasedFilterExecuterImpl {
     //                       Block Min <-----------------------> Block Max
     //         Filter Min <-----------------------------------------------> Filter Max
 
-    if (isDimensionPresentInCurrentBlock == true) {
+    if (isDimensionPresentInCurrentBlock) {
       if (((lessThanExp == true) && (
           ByteUtil.UnsafeComparer.INSTANCE.compareTo(blockMinValue, filterValues[1]) >= 0)) || (
           (lessThanEqualExp == true) && (
@@ -474,80 +473,175 @@ public class RangeValueFilterExecuterImpl extends ValueBasedFilterExecuterImpl {
       int numerOfRows) {
     BitSet bitSet = new BitSet(numerOfRows);
     // if (dimensionColumnDataChunk instanceof FixedLengthDimensionDataChunk) {
-    int start = 0;
-    int startMin = 0;
-    int endMax = 0;
-    int startIndex = 0;
     byte[][] filterValues = this.filterRangesValues;
-    // For Range expression we expect two values. The First is the Min Value and Second is the
-    // Max value.
-    if (startBlockMinIsDefaultStart == false) {
-
-      start = CarbonUtil
-          .getFirstIndexUsingBinarySearch(dimensionColumnDataChunk,
-              startIndex, numerOfRows - 1, filterValues[0], greaterThanExp);
+    if (dimensionColumnDataChunk.isExplicitSorted()) {
+      int start = 0;
+      int startMin = 0;
+      int endMax = 0;
+      int startIndex = 0;
+      // For Range expression we expect two values. The First is the Min Value and Second is the
+      // Max value.
+      if (startBlockMinIsDefaultStart == false) {
 
-      if (greaterThanExp == true && start >= 0) {
         start = CarbonUtil
-            .nextGreaterValueToTarget(start, dimensionColumnDataChunk, filterValues[0],
-                numerOfRows);
-      }
+            .getFirstIndexUsingBinarySearch(dimensionColumnDataChunk, startIndex, numerOfRows - 1,
+                filterValues[0], greaterThanExp);
 
-      if (start < 0) {
-        start = -(start + 1);
-        if (start == numerOfRows) {
-          start = start - 1;
+        if (greaterThanExp == true && start >= 0) {
+          start = CarbonUtil
+              .nextGreaterValueToTarget(start, dimensionColumnDataChunk, filterValues[0],
+                  numerOfRows);
         }
-        // Method will compare the tentative index value after binary search, this tentative
-        // index needs to be compared by the filter member if its >= filter then from that
-        // index the bitset will be considered for filtering process.
-        if ((ByteUtil.compare(filterValues[0], dimensionColumnDataChunk.getChunkData(start)))
-            > 0) {
-          start = start + 1;
+
+        if (start < 0) {
+          start = -(start + 1);
+          if (start == numerOfRows) {
+            start = start - 1;
+          }
+          // Method will compare the tentative index value after binary search, this tentative
+          // index needs to be compared by the filter member if its >= filter then from that
+          // index the bitset will be considered for filtering process.
+          if ((ByteUtil.compare(filterValues[0], dimensionColumnDataChunk.getChunkData(start)))
+              > 0) {
+            start = start + 1;
+          }
         }
+        startMin = start;
+      } else {
+        startMin = startIndex;
       }
-      startMin = start;
-    } else {
-      startMin = startIndex;
-    }
-
-    if (endBlockMaxisDefaultEnd == false) {
-      start = CarbonUtil
-          .getFirstIndexUsingBinarySearch(dimensionColumnDataChunk, startIndex, numerOfRows - 1,
-              filterValues[1], lessThanEqualExp);
 
-      if (lessThanExp == true && start >= 0) {
-        start =
-            CarbonUtil.nextLesserValueToTarget(start, dimensionColumnDataChunk, filterValues[1]);
-      }
+      if (endBlockMaxisDefaultEnd == false) {
+        start = CarbonUtil
+            .getFirstIndexUsingBinarySearch(dimensionColumnDataChunk, startIndex, numerOfRows - 1,
+                filterValues[1], lessThanEqualExp);
 
-      if (start < 0) {
-        start = -(start + 1);
-        if (start == numerOfRows) {
-          start = start - 1;
+        if (lessThanExp == true && start >= 0) {
+          start =
+              CarbonUtil.nextLesserValueToTarget(start, dimensionColumnDataChunk, filterValues[1]);
         }
-        // In case the start is less than 0, then positive value of start is pointing to the next
-        // value of the searched key. So move to the previous one.
-        if ((ByteUtil.compare(filterValues[1], dimensionColumnDataChunk.getChunkData(start))
-            < 0)) {
-          start = start - 1;
+
+        if (start < 0) {
+          start = -(start + 1);
+          if (start == numerOfRows) {
+            start = start - 1;
+          }
+          // In case the start is less than 0, then positive value of start is pointing to the next
+          // value of the searched key. So move to the previous one.
+          if ((ByteUtil.compare(filterValues[1], dimensionColumnDataChunk.getChunkData(start))
+              < 0)) {
+            start = start - 1;
+          }
         }
+        endMax = start;
+      } else {
+        endMax = numerOfRows - 1;
+      }
+
+      for (int j = startMin; j <= endMax; j++) {
+        bitSet.set(j);
+      }
+
+      // Binary Search cannot be done on '@NU#LL$!", so need to check and compare for null on
+      // matching row.
+      if (dimensionColumnDataChunk.isNoDicitionaryColumn()) {
+        updateForNoDictionaryColumn(startMin, endMax, dimensionColumnDataChunk, bitSet);
       }
-      endMax = start;
     } else {
-      endMax = numerOfRows - 1;
+      byte[] defaultValue = null;
+      if (dimColEvaluatorInfo.getDimension().hasEncoding(Encoding.DIRECT_DICTIONARY)) {
+        DirectDictionaryGenerator directDictionaryGenerator = DirectDictionaryKeyGeneratorFactory
+            .getDirectDictionaryGenerator(dimColEvaluatorInfo.getDimension().getDataType());
+        int key = directDictionaryGenerator.generateDirectSurrogateKey(null) + 1;
+        CarbonDimension currentBlockDimension =
+            segmentProperties.getDimensions().get(dimensionBlocksIndex);
+        defaultValue = FilterUtil.getMaskKey(key, currentBlockDimension,
+            this.segmentProperties.getSortColumnsGenerator());
+      } else {
+        defaultValue = CarbonCommonConstants.MEMBER_DEFAULT_VAL_ARRAY;
+      }
+      // evaluate result for lower range value first and then perform and operation in the
+      // upper range value in order to compute the final result
+      bitSet = evaluateGreaterThanFilterForUnsortedColumn(dimensionColumnDataChunk, filterValues[0],
+          numerOfRows);
+      BitSet upperRangeBitSet =
+          evaluateLessThanFilterForUnsortedColumn(dimensionColumnDataChunk, filterValues[1],
+              numerOfRows);
+      bitSet.and(upperRangeBitSet);
+      FilterUtil.removeNullValues(dimensionColumnDataChunk, bitSet, defaultValue);
     }
+    return bitSet;
+  }
 
-    for (int j = startMin; j <= endMax; j++) {
-      bitSet.set(j);
+  /**
+   * This method will compare the selected data against null values and
+   * flip the bitSet if any null value is found
+   *
+   * @param dimensionColumnDataChunk
+   * @param bitSet
+   */
+  private void removeNullValues(DimensionColumnDataChunk dimensionColumnDataChunk, BitSet bitSet) {
+    if (!bitSet.isEmpty()) {
+      for (int i = bitSet.nextSetBit(0); i >= 0; i = bitSet.nextSetBit(i + 1)) {
+        if (dimensionColumnDataChunk.compareTo(i, CarbonCommonConstants.MEMBER_DEFAULT_VAL_ARRAY)
+            == 0) {
+          bitSet.flip(i);
+        }
+      }
     }
+  }
 
-    // Binary Search cannot be done on '@NU#LL$!", so need to check and compare for null on
-    // matching row.
-    if (dimensionColumnDataChunk.isNoDicitionaryColumn()) {
-      updateForNoDictionaryColumn(startMin, endMax, dimensionColumnDataChunk, bitSet);
+  /**
+   * This method will evaluate the result for filter column based on the lower range value
+   *
+   * @param dimensionColumnDataChunk
+   * @param filterValue
+   * @param numberOfRows
+   * @return
+   */
+  private BitSet evaluateGreaterThanFilterForUnsortedColumn(
+      DimensionColumnDataChunk dimensionColumnDataChunk, byte[] filterValue, int numberOfRows) {
+    BitSet bitSet = new BitSet(numberOfRows);
+    if (greaterThanExp) {
+      for (int i = 0; i < numberOfRows; i++) {
+        if ((ByteUtil.compare(dimensionColumnDataChunk.getChunkData(i), filterValue) > 0)) {
+          bitSet.set(i);
+        }
+      }
+    } else if (greaterThanEqualExp) {
+      for (int i = 0; i < numberOfRows; i++) {
+        if ((ByteUtil.compare(dimensionColumnDataChunk.getChunkData(i), filterValue) >= 0)) {
+          bitSet.set(i);
+        }
+      }
     }
+    return bitSet;
+  }
 
+  /**
+   * This method will evaluate the result for filter column based on the upper range value
+   *
+   * @param dimensionColumnDataChunk
+   * @param filterValue
+   * @param numberOfRows
+   * @return
+   */
+  private BitSet evaluateLessThanFilterForUnsortedColumn(
+      DimensionColumnDataChunk dimensionColumnDataChunk, byte[] filterValue, int numberOfRows) {
+    BitSet bitSet = new BitSet(numberOfRows);
+    if (lessThanExp) {
+      for (int i = 0; i < numberOfRows; i++) {
+        if ((ByteUtil.compare(dimensionColumnDataChunk.getChunkData(i), filterValue) < 0)) {
+          bitSet.set(i);
+        }
+      }
+    } else if (lessThanEqualExp) {
+      for (int i = 0; i < numberOfRows; i++) {
+        if ((ByteUtil.compare(dimensionColumnDataChunk.getChunkData(i), filterValue) <= 0)) {
+          bitSet.set(i);
+        }
+      }
+    }
     return bitSet;
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/357ab636/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelFilterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelFilterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelFilterExecuterImpl.java
index 470de89..a72d526 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelFilterExecuterImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelFilterExecuterImpl.java
@@ -95,6 +95,11 @@ public class RowLevelFilterExecuterImpl implements FilterExecuter {
    */
   protected boolean[] isMeasurePresentInCurrentBlock;
 
+  /**
+   * is dimension column data is natural sorted
+   */
+  protected boolean isNaturalSorted;
+
   public RowLevelFilterExecuterImpl(List<DimColumnResolvedFilterInfo> dimColEvaluatorInfoList,
       List<MeasureColumnResolvedFilterInfo> msrColEvalutorInfoList, Expression exp,
       AbsoluteTableIdentifier tableIdentifier, SegmentProperties segmentProperties,

http://git-wip-us.apache.org/repos/asf/carbondata/blob/357ab636/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeGrtThanFiterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeGrtThanFiterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeGrtThanFiterExecuterImpl.java
index 6f8651a..be82be7 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeGrtThanFiterExecuterImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeGrtThanFiterExecuterImpl.java
@@ -20,6 +20,7 @@ import java.io.IOException;
 import java.util.BitSet;
 import java.util.List;
 
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
 import org.apache.carbondata.core.datastore.block.SegmentProperties;
 import org.apache.carbondata.core.datastore.chunk.DimensionColumnDataChunk;
 import org.apache.carbondata.core.datastore.chunk.impl.DimensionRawColumnChunk;
@@ -43,7 +44,6 @@ public class RowLevelRangeGrtThanFiterExecuterImpl extends RowLevelFilterExecute
    * flag to check whether default values is present in the filter value list
    */
   private boolean isDefaultValuePresentInFilter;
-
   public RowLevelRangeGrtThanFiterExecuterImpl(
       List<DimColumnResolvedFilterInfo> dimColEvaluatorInfoList,
       List<MeasureColumnResolvedFilterInfo> msrColEvalutorInfoList, Expression exp,
@@ -52,6 +52,8 @@ public class RowLevelRangeGrtThanFiterExecuterImpl extends RowLevelFilterExecute
     super(dimColEvaluatorInfoList, msrColEvalutorInfoList, exp, tableIdentifier, segmentProperties,
         null);
     this.filterRangeValues = filterRangeValues;
+    isNaturalSorted = dimColEvaluatorInfoList.get(0).getDimension().isUseInvertedIndex()
+        && dimColEvaluatorInfoList.get(0).getDimension().isSortColumn();
     ifDefaultValueMatchesFilter();
   }
 
@@ -150,10 +152,17 @@ public class RowLevelRangeGrtThanFiterExecuterImpl extends RowLevelFilterExecute
 
   private BitSet getFilteredIndexes(DimensionColumnDataChunk dimensionColumnDataChunk,
       int numerOfRows) {
+    BitSet bitSet = null;
     if (dimensionColumnDataChunk.isExplicitSorted()) {
-      return setFilterdIndexToBitSetWithColumnIndex(dimensionColumnDataChunk, numerOfRows);
+      bitSet = setFilterdIndexToBitSetWithColumnIndex(dimensionColumnDataChunk, numerOfRows);
+    } else {
+      bitSet = setFilterdIndexToBitSet(dimensionColumnDataChunk, numerOfRows);
+    }
+    if (dimensionColumnDataChunk.isNoDicitionaryColumn()) {
+      FilterUtil.removeNullValues(dimensionColumnDataChunk, bitSet,
+          CarbonCommonConstants.MEMBER_DEFAULT_VAL_ARRAY);
     }
-    return setFilterdIndexToBitSet(dimensionColumnDataChunk, numerOfRows);
+    return bitSet;
   }
 
   /**
@@ -228,39 +237,50 @@ public class RowLevelRangeGrtThanFiterExecuterImpl extends RowLevelFilterExecute
   private BitSet setFilterdIndexToBitSet(DimensionColumnDataChunk dimensionColumnDataChunk,
       int numerOfRows) {
     BitSet bitSet = new BitSet(numerOfRows);
-    int start = 0;
-    int last = 0;
-    int startIndex = 0;
     byte[][] filterValues = this.filterRangeValues;
-    for (int k = 0; k < filterValues.length; k++) {
-      start = CarbonUtil
-          .getFirstIndexUsingBinarySearch(dimensionColumnDataChunk, startIndex, numerOfRows - 1,
-              filterValues[k], true);
-      if (start >= 0) {
+    // binary search can only be applied if column is sorted
+    if (isNaturalSorted) {
+      int start = 0;
+      int last = 0;
+      int startIndex = 0;
+      for (int k = 0; k < filterValues.length; k++) {
         start = CarbonUtil
-            .nextGreaterValueToTarget(start, dimensionColumnDataChunk, filterValues[k],
-                numerOfRows);
-      }
-      if (start < 0) {
-        start = -(start + 1);
-        if (start == numerOfRows) {
-          start = start - 1;
+            .getFirstIndexUsingBinarySearch(dimensionColumnDataChunk, startIndex,
+                numerOfRows - 1, filterValues[k], true);
+        if (start >= 0) {
+          start = CarbonUtil
+              .nextGreaterValueToTarget(start, dimensionColumnDataChunk, filterValues[k],
+                  numerOfRows);
         }
-        // Method will compare the tentative index value after binary search, this tentative
-        // index needs to be compared by the filter member if its > filter then from that
-        // index the bitset will be considered for filtering process.
-        if (ByteUtil.compare(filterValues[k], dimensionColumnDataChunk.getChunkData(start)) > 0) {
-          start = start + 1;
+        if (start < 0) {
+          start = -(start + 1);
+          if (start == numerOfRows) {
+            start = start - 1;
+          }
+          // Method will compare the tentative index value after binary search, this tentative
+          // index needs to be compared by the filter member if its > filter then from that
+          // index the bitset will be considered for filtering process.
+          if (ByteUtil.compare(filterValues[k], dimensionColumnDataChunk.getChunkData(start)) > 0) {
+            start = start + 1;
+          }
+        }
+        last = start;
+        for (int j = start; j < numerOfRows; j++) {
+          bitSet.set(j);
+          last++;
+        }
+        startIndex = last;
+        if (startIndex >= numerOfRows) {
+          break;
         }
       }
-      last = start;
-      for (int j = start; j < numerOfRows; j++) {
-        bitSet.set(j);
-        last++;
-      }
-      startIndex = last;
-      if (startIndex >= numerOfRows) {
-        break;
+    } else {
+      for (int k = 0; k < filterValues.length; k++) {
+        for (int i = 0; i < numerOfRows; i++) {
+          if (ByteUtil.compare(dimensionColumnDataChunk.getChunkData(i), filterValues[k]) > 0) {
+            bitSet.set(i);
+          }
+        }
       }
     }
     return bitSet;

http://git-wip-us.apache.org/repos/asf/carbondata/blob/357ab636/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeGrtrThanEquaToFilterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeGrtrThanEquaToFilterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeGrtrThanEquaToFilterExecuterImpl.java
index fbc9b30..53da6c5 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeGrtrThanEquaToFilterExecuterImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeGrtrThanEquaToFilterExecuterImpl.java
@@ -20,6 +20,7 @@ import java.io.IOException;
 import java.util.BitSet;
 import java.util.List;
 
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
 import org.apache.carbondata.core.datastore.block.SegmentProperties;
 import org.apache.carbondata.core.datastore.chunk.DimensionColumnDataChunk;
 import org.apache.carbondata.core.datastore.chunk.impl.DimensionRawColumnChunk;
@@ -53,6 +54,8 @@ public class RowLevelRangeGrtrThanEquaToFilterExecuterImpl extends RowLevelFilte
     super(dimColEvaluatorInfoList, msrColEvalutorInfoList, exp, tableIdentifier, segmentProperties,
         null);
     this.filterRangeValues = filterRangeValues;
+    isNaturalSorted = dimColEvaluatorInfoList.get(0).getDimension().isUseInvertedIndex()
+        && dimColEvaluatorInfoList.get(0).getDimension().isSortColumn();
     ifDefaultValueMatchesFilter();
   }
 
@@ -151,10 +154,17 @@ public class RowLevelRangeGrtrThanEquaToFilterExecuterImpl extends RowLevelFilte
 
   private BitSet getFilteredIndexes(DimensionColumnDataChunk dimensionColumnDataChunk,
       int numerOfRows) {
+    BitSet bitSet = null;
     if (dimensionColumnDataChunk.isExplicitSorted()) {
-      return setFilterdIndexToBitSetWithColumnIndex(dimensionColumnDataChunk, numerOfRows);
+      bitSet = setFilterdIndexToBitSetWithColumnIndex(dimensionColumnDataChunk, numerOfRows);
+    } else {
+      bitSet = setFilterdIndexToBitSet(dimensionColumnDataChunk, numerOfRows);
+    }
+    if (dimensionColumnDataChunk.isNoDicitionaryColumn()) {
+      FilterUtil.removeNullValues(dimensionColumnDataChunk, bitSet,
+          CarbonCommonConstants.MEMBER_DEFAULT_VAL_ARRAY);
     }
-    return setFilterdIndexToBitSet(dimensionColumnDataChunk, numerOfRows);
+    return bitSet;
   }
 
   /**
@@ -218,35 +228,46 @@ public class RowLevelRangeGrtrThanEquaToFilterExecuterImpl extends RowLevelFilte
   private BitSet setFilterdIndexToBitSet(DimensionColumnDataChunk dimensionColumnDataChunk,
       int numerOfRows) {
     BitSet bitSet = new BitSet(numerOfRows);
-    int start = 0;
-    int last = 0;
-    int startIndex = 0;
     byte[][] filterValues = this.filterRangeValues;
-    for (int k = 0; k < filterValues.length; k++) {
-      start = CarbonUtil
-          .getFirstIndexUsingBinarySearch(dimensionColumnDataChunk, startIndex, numerOfRows - 1,
-              filterValues[k], false);
-      if (start < 0) {
-        start = -(start + 1);
-        if (start == numerOfRows) {
-          start = start - 1;
-        }
-        // Method will compare the tentative index value after binary search, this tentative
-        // index needs to be compared by the filter member if its >= filter then from that
-        // index the bitset will be considered for filtering process.
-        if (ByteUtil.compare(filterValues[k], dimensionColumnDataChunk.getChunkData(start)) > 0) {
-          start = start + 1;
+    // binary search can only be applied if column is sorted
+    if (isNaturalSorted) {
+      int start = 0;
+      int last = 0;
+      int startIndex = 0;
+      for (int k = 0; k < filterValues.length; k++) {
+        start = CarbonUtil
+            .getFirstIndexUsingBinarySearch(dimensionColumnDataChunk, startIndex,
+                numerOfRows - 1, filterValues[k], false);
+        if (start < 0) {
+          start = -(start + 1);
+          if (start == numerOfRows) {
+            start = start - 1;
+          }
+          // Method will compare the tentative index value after binary search, this tentative
+          // index needs to be compared by the filter member if its >= filter then from that
+          // index the bitset will be considered for filtering process.
+          if (ByteUtil.compare(filterValues[k], dimensionColumnDataChunk.getChunkData(start)) > 0) {
+            start = start + 1;
+          }
         }
-      }
 
-      last = start;
-      for (int j = start; j < numerOfRows; j++) {
-        bitSet.set(j);
-        last++;
+        last = start;
+        for (int j = start; j < numerOfRows; j++) {
+          bitSet.set(j);
+          last++;
+        }
+        startIndex = last;
+        if (startIndex >= numerOfRows) {
+          break;
+        }
       }
-      startIndex = last;
-      if (startIndex >= numerOfRows) {
-        break;
+    } else {
+      for (int k = 0; k < filterValues.length; k++) {
+        for (int i = 0; i < numerOfRows; i++) {
+          if (ByteUtil.compare(dimensionColumnDataChunk.getChunkData(i), filterValues[k]) >= 0) {
+            bitSet.set(i);
+          }
+        }
       }
     }
     return bitSet;

http://git-wip-us.apache.org/repos/asf/carbondata/blob/357ab636/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanEqualFilterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanEqualFilterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanEqualFilterExecuterImpl.java
index 99f5700..d694960 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanEqualFilterExecuterImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanEqualFilterExecuterImpl.java
@@ -20,6 +20,7 @@ import java.io.IOException;
 import java.util.BitSet;
 import java.util.List;
 
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
 import org.apache.carbondata.core.datastore.block.SegmentProperties;
 import org.apache.carbondata.core.datastore.chunk.DimensionColumnDataChunk;
 import org.apache.carbondata.core.datastore.chunk.impl.DimensionRawColumnChunk;
@@ -45,7 +46,6 @@ public class RowLevelRangeLessThanEqualFilterExecuterImpl extends RowLevelFilter
    * flag to check whether default values is present in the filter value list
    */
   private boolean isDefaultValuePresentInFilter;
-
   public RowLevelRangeLessThanEqualFilterExecuterImpl(
       List<DimColumnResolvedFilterInfo> dimColEvaluatorInfoList,
       List<MeasureColumnResolvedFilterInfo> msrColEvalutorInfoList, Expression exp,
@@ -55,6 +55,8 @@ public class RowLevelRangeLessThanEqualFilterExecuterImpl extends RowLevelFilter
         null);
     this.filterRangeValues = filterRangeValues;
     ifDefaultValueMatchesFilter();
+    isNaturalSorted = dimColEvaluatorInfoList.get(0).getDimension().isUseInvertedIndex()
+        && dimColEvaluatorInfoList.get(0).getDimension().isSortColumn();
   }
 
   /**
@@ -153,13 +155,20 @@ public class RowLevelRangeLessThanEqualFilterExecuterImpl extends RowLevelFilter
       CarbonDimension currentBlockDimension =
           segmentProperties.getDimensions().get(dimensionBlocksIndex[0]);
       defaultValue = FilterUtil.getMaskKey(key, currentBlockDimension,
-          this.segmentProperties.getDimensionKeyGenerator());
+          this.segmentProperties.getSortColumnsGenerator());
     }
+    BitSet bitSet = null;
     if (dimensionColumnDataChunk.isExplicitSorted()) {
-      return setFilterdIndexToBitSetWithColumnIndex(dimensionColumnDataChunk, numerOfRows,
+      bitSet = setFilterdIndexToBitSetWithColumnIndex(dimensionColumnDataChunk, numerOfRows,
           defaultValue);
+    } else {
+      bitSet = setFilterdIndexToBitSet(dimensionColumnDataChunk, numerOfRows, defaultValue);
     }
-    return setFilterdIndexToBitSet(dimensionColumnDataChunk, numerOfRows, defaultValue);
+    if (dimensionColumnDataChunk.isNoDicitionaryColumn()) {
+      FilterUtil.removeNullValues(dimensionColumnDataChunk, bitSet,
+          CarbonCommonConstants.MEMBER_DEFAULT_VAL_ARRAY);
+    }
+    return bitSet;
   }
 
   /**
@@ -242,51 +251,62 @@ public class RowLevelRangeLessThanEqualFilterExecuterImpl extends RowLevelFilter
   private BitSet setFilterdIndexToBitSet(DimensionColumnDataChunk dimensionColumnDataChunk,
       int numerOfRows, byte[] defaultValue) {
     BitSet bitSet = new BitSet(numerOfRows);
-    int start = 0;
-    int last = 0;
-    int startIndex = 0;
     byte[][] filterValues = this.filterRangeValues;
-    int skip = 0;
-    //find the number of default values to skip the null value in case of direct dictionary
-    if (null != defaultValue) {
-      start = CarbonUtil
-          .getFirstIndexUsingBinarySearch(dimensionColumnDataChunk, startIndex, numerOfRows - 1,
-              defaultValue, true);
-      if (start < 0) {
-        skip = -(start + 1);
-        // end of block
-        if (skip == numerOfRows) {
-          return bitSet;
+    // binary search can only be applied if column is sorted
+    if (isNaturalSorted) {
+      int start = 0;
+      int last = 0;
+      int startIndex = 0;
+      int skip = 0;
+      //find the number of default values to skip the null value in case of direct dictionary
+      if (null != defaultValue) {
+        start = CarbonUtil
+            .getFirstIndexUsingBinarySearch(dimensionColumnDataChunk, startIndex,
+                numerOfRows - 1, defaultValue, true);
+        if (start < 0) {
+          skip = -(start + 1);
+          // end of block
+          if (skip == numerOfRows) {
+            return bitSet;
+          }
+        } else {
+          skip = start;
         }
-      } else {
-        skip = start;
+        startIndex = skip;
       }
-      startIndex = skip;
-    }
-    for (int k = 0; k < filterValues.length; k++) {
-      start = CarbonUtil
-          .getFirstIndexUsingBinarySearch(dimensionColumnDataChunk, startIndex, numerOfRows - 1,
-              filterValues[k], true);
-      if (start < 0) {
-        start = -(start + 1);
-        if (start >= numerOfRows) {
-          start = start - 1;
+      for (int k = 0; k < filterValues.length; k++) {
+        start = CarbonUtil
+            .getFirstIndexUsingBinarySearch(dimensionColumnDataChunk, startIndex,
+                numerOfRows - 1, filterValues[k], true);
+        if (start < 0) {
+          start = -(start + 1);
+          if (start >= numerOfRows) {
+            start = start - 1;
+          }
+          // When negative value of start is returned from getFirstIndexUsingBinarySearch the Start
+          // will be pointing to the next consecutive position. So compare it again and point to the
+          // previous value returned from getFirstIndexUsingBinarySearch.
+          if (ByteUtil.compare(filterValues[k], dimensionColumnDataChunk.getChunkData(start)) < 0) {
+            start = start - 1;
+          }
         }
-        // When negative value of start is returned from getFirstIndexUsingBinarySearch the Start
-        // will be pointing to the next consecutive position. So compare it again and point to the
-        // previous value returned from getFirstIndexUsingBinarySearch.
-        if (ByteUtil.compare(filterValues[k], dimensionColumnDataChunk.getChunkData(start)) < 0) {
-          start = start - 1;
+        last = start;
+        for (int j = start; j >= skip; j--) {
+          bitSet.set(j);
+          last--;
+        }
+        startIndex = last;
+        if (startIndex <= 0) {
+          break;
         }
       }
-      last = start;
-      for (int j = start; j >= skip; j--) {
-        bitSet.set(j);
-        last--;
-      }
-      startIndex = last;
-      if (startIndex <= 0) {
-        break;
+    } else {
+      for (int k = 0; k < filterValues.length; k++) {
+        for (int i = 0; i < numerOfRows; i++) {
+          if (ByteUtil.compare(dimensionColumnDataChunk.getChunkData(i), filterValues[k]) <= 0) {
+            bitSet.set(i);
+          }
+        }
       }
     }
     return bitSet;

http://git-wip-us.apache.org/repos/asf/carbondata/blob/357ab636/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanFiterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanFiterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanFiterExecuterImpl.java
index 5bdf315..b3dd921 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanFiterExecuterImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanFiterExecuterImpl.java
@@ -20,6 +20,7 @@ import java.io.IOException;
 import java.util.BitSet;
 import java.util.List;
 
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
 import org.apache.carbondata.core.datastore.block.SegmentProperties;
 import org.apache.carbondata.core.datastore.chunk.DimensionColumnDataChunk;
 import org.apache.carbondata.core.datastore.chunk.impl.DimensionRawColumnChunk;
@@ -55,6 +56,8 @@ public class RowLevelRangeLessThanFiterExecuterImpl extends RowLevelFilterExecut
         null);
     this.filterRangeValues = filterRangeValues;
     ifDefaultValueMatchesFilter();
+    isNaturalSorted = dimColEvaluatorInfoList.get(0).getDimension().isUseInvertedIndex()
+        && dimColEvaluatorInfoList.get(0).getDimension().isSortColumn();
   }
 
   /**
@@ -153,13 +156,20 @@ public class RowLevelRangeLessThanFiterExecuterImpl extends RowLevelFilterExecut
       CarbonDimension currentBlockDimension =
           segmentProperties.getDimensions().get(dimensionBlocksIndex[0]);
       defaultValue = FilterUtil.getMaskKey(key, currentBlockDimension,
-          this.segmentProperties.getDimensionKeyGenerator());
+          this.segmentProperties.getSortColumnsGenerator());
     }
+    BitSet bitSet = null;
     if (dimensionColumnDataChunk.isExplicitSorted()) {
-      return setFilterdIndexToBitSetWithColumnIndex(dimensionColumnDataChunk, numerOfRows,
+      bitSet = setFilterdIndexToBitSetWithColumnIndex(dimensionColumnDataChunk, numerOfRows,
           defaultValue);
+    } else {
+      bitSet = setFilterdIndexToBitSet(dimensionColumnDataChunk, numerOfRows, defaultValue);
+    }
+    if (dimensionColumnDataChunk.isNoDicitionaryColumn()) {
+      FilterUtil.removeNullValues(dimensionColumnDataChunk, bitSet,
+          CarbonCommonConstants.MEMBER_DEFAULT_VAL_ARRAY);
     }
-    return setFilterdIndexToBitSet(dimensionColumnDataChunk, numerOfRows, defaultValue);
+    return bitSet;
   }
 
   /**
@@ -251,56 +261,67 @@ public class RowLevelRangeLessThanFiterExecuterImpl extends RowLevelFilterExecut
   private BitSet setFilterdIndexToBitSet(DimensionColumnDataChunk dimensionColumnDataChunk,
       int numerOfRows, byte[] defaultValue) {
     BitSet bitSet = new BitSet(numerOfRows);
-    int start = 0;
-    int last = 0;
-    int startIndex = 0;
-    int skip = 0;
     byte[][] filterValues = this.filterRangeValues;
-    //find the number of default values to skip the null value in case of direct dictionary
-    if (null != defaultValue) {
-      start = CarbonUtil
-          .getFirstIndexUsingBinarySearch(dimensionColumnDataChunk, startIndex, numerOfRows - 1,
-              defaultValue, false);
-      if (start < 0) {
-        skip = -(start + 1);
-        // end of block
-        if (skip == numerOfRows) {
-          return bitSet;
+    // binary search can only be applied if column is sorted
+    if (isNaturalSorted) {
+      int start = 0;
+      int last = 0;
+      int startIndex = 0;
+      int skip = 0;
+      //find the number of default values to skip the null value in case of direct dictionary
+      if (null != defaultValue) {
+        start = CarbonUtil
+            .getFirstIndexUsingBinarySearch(dimensionColumnDataChunk, startIndex,
+                numerOfRows - 1, defaultValue, false);
+        if (start < 0) {
+          skip = -(start + 1);
+          // end of block
+          if (skip == numerOfRows) {
+            return bitSet;
+          }
+        } else {
+          skip = start;
         }
-      } else {
-        skip = start;
+        startIndex = skip;
       }
-      startIndex = skip;
-    }
-    for (int k = 0; k < filterValues.length; k++) {
-      start = CarbonUtil
-          .getFirstIndexUsingBinarySearch(dimensionColumnDataChunk, startIndex, numerOfRows - 1,
-              filterValues[k], false);
-      if (start >= 0) {
-        start =
-            CarbonUtil.nextLesserValueToTarget(start, dimensionColumnDataChunk, filterValues[k]);
-      }
-      if (start < 0) {
-        start = -(start + 1);
+      for (int k = 0; k < filterValues.length; k++) {
+        start = CarbonUtil
+            .getFirstIndexUsingBinarySearch(dimensionColumnDataChunk, startIndex,
+                numerOfRows - 1, filterValues[k], false);
+        if (start >= 0) {
+          start =
+              CarbonUtil.nextLesserValueToTarget(start, dimensionColumnDataChunk, filterValues[k]);
+        }
+        if (start < 0) {
+          start = -(start + 1);
 
-        if (start >= numerOfRows) {
-          start = numerOfRows - 1;
+          if (start >= numerOfRows) {
+            start = numerOfRows - 1;
+          }
+          // When negative value of start is returned from getFirstIndexUsingBinarySearch the Start
+          // will be pointing to the next consecutive position. So compare it again and point to the
+          // previous value returned from getFirstIndexUsingBinarySearch.
+          if (ByteUtil.compare(filterValues[k], dimensionColumnDataChunk.getChunkData(start)) < 0) {
+            start = start - 1;
+          }
         }
-        // When negative value of start is returned from getFirstIndexUsingBinarySearch the Start
-        // will be pointing to the next consecutive position. So compare it again and point to the
-        // previous value returned from getFirstIndexUsingBinarySearch.
-        if (ByteUtil.compare(filterValues[k], dimensionColumnDataChunk.getChunkData(start)) < 0) {
-          start = start - 1;
+        last = start;
+        for (int j = start; j >= skip; j--) {
+          bitSet.set(j);
+          last--;
+        }
+        startIndex = last;
+        if (startIndex <= 0) {
+          break;
         }
       }
-      last = start;
-      for (int j = start; j >= skip; j--) {
-        bitSet.set(j);
-        last--;
-      }
-      startIndex = last;
-      if (startIndex <= 0) {
-        break;
+    } else {
+      for (int k = 0; k < filterValues.length; k++) {
+        for (int i = 0; i < numerOfRows; i++) {
+          if (ByteUtil.compare(dimensionColumnDataChunk.getChunkData(i), filterValues[k]) < 0) {
+            bitSet.set(i);
+          }
+        }
       }
     }
     return bitSet;


[32/42] carbondata git commit: Fixed database cascade in spark 2.1 and alter table in vector mode.

Posted by ra...@apache.org.
Fixed database cascade in spark 2.1 and alter table in vector mode.


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/809d8806
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/809d8806
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/809d8806

Branch: refs/heads/branch-1.1
Commit: 809d880684f68a43bb83eccf23c5409fe02bf15f
Parents: 105b7c3
Author: ravipesala <ra...@gmail.com>
Authored: Sat Jun 3 13:11:57 2017 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 13:25:54 2017 +0530

----------------------------------------------------------------------
 ...tCreateTableWithDatabaseNameCaseChange.scala | 24 ++++++++++++++++++++
 .../spark/sql/test/SparkTestQueryExecutor.scala |  1 +
 .../vectorreader/ColumnarVectorWrapper.java     | 14 ++++++------
 .../execution/command/CarbonHiveCommands.scala  |  7 +++---
 4 files changed, 35 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/809d8806/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateTableWithDatabaseNameCaseChange.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateTableWithDatabaseNameCaseChange.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateTableWithDatabaseNameCaseChange.scala
index 87aac94..5bf55f9 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateTableWithDatabaseNameCaseChange.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateTableWithDatabaseNameCaseChange.scala
@@ -51,6 +51,30 @@ class TestCreateTableWithDatabaseNameCaseChange extends QueryTest with BeforeAnd
     }
   }
 
+  test("test drop database cascade with case sensitive") {
+    // this test case will test the creation of table for different case for database name.
+    // In hive dbName folder is always created with small case in HDFS. Carbon should behave
+    // the same way. If table creation fails during second time creation it means in HDFS
+    // separate folders are created for the matching case in commands executed.
+    sql("drop database if exists AbCdEf cascade")
+    sql("create database AbCdEf")
+    sql("use AbCdEf")
+    sql("create table carbonTable(a int, b string)stored by 'carbondata'")
+    sql("use default")
+    sql("drop database if exists AbCdEf cascade")
+    sql("create database AbCdEf")
+    sql("use AbCdEf")
+    try {
+      sql("create table carbonTable(a int, b string)stored by 'carbondata'")
+      assert(true)
+    } catch {
+      case ex: Exception =>
+        assert(false)
+    }
+    sql("use default")
+    sql("drop database if exists AbCdEf cascade")
+  }
+
   override def afterAll {
     sql("use default")
     sql("drop database if exists dbCaseChange cascade")

http://git-wip-us.apache.org/repos/asf/carbondata/blob/809d8806/integration/spark/src/main/scala/org/apache/spark/sql/test/SparkTestQueryExecutor.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/test/SparkTestQueryExecutor.scala b/integration/spark/src/main/scala/org/apache/spark/sql/test/SparkTestQueryExecutor.scala
index 591cdf4..27df623 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/test/SparkTestQueryExecutor.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/test/SparkTestQueryExecutor.scala
@@ -42,6 +42,7 @@ object SparkTestQueryExecutor {
     .addProperty(CarbonCommonConstants.STORE_LOCATION_TEMP_PATH,
       System.getProperty("java.io.tmpdir"))
     .addProperty(CarbonCommonConstants.LOCK_TYPE, CarbonCommonConstants.CARBON_LOCK_TYPE_LOCAL)
+    .addProperty(CarbonCommonConstants.STORE_LOCATION, TestQueryExecutor.storeLocation)
 
   val sc = new SparkContext(new SparkConf()
     .setAppName("CarbonSpark")

http://git-wip-us.apache.org/repos/asf/carbondata/blob/809d8806/integration/spark2/src/main/java/org/apache/carbondata/spark/vectorreader/ColumnarVectorWrapper.java
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/java/org/apache/carbondata/spark/vectorreader/ColumnarVectorWrapper.java b/integration/spark2/src/main/java/org/apache/carbondata/spark/vectorreader/ColumnarVectorWrapper.java
index c3d2a87..5ab741b 100644
--- a/integration/spark2/src/main/java/org/apache/carbondata/spark/vectorreader/ColumnarVectorWrapper.java
+++ b/integration/spark2/src/main/java/org/apache/carbondata/spark/vectorreader/ColumnarVectorWrapper.java
@@ -60,7 +60,7 @@ class ColumnarVectorWrapper implements CarbonColumnVector {
     if (filteredRowsExist) {
       for (int i = 0; i < count; i++) {
         if (!filteredRows[rowId]) {
-          putShort(counter++, value);
+          columnVector.putShort(counter++, value);
         }
         rowId++;
       }
@@ -79,7 +79,7 @@ class ColumnarVectorWrapper implements CarbonColumnVector {
     if (filteredRowsExist) {
       for (int i = 0; i < count; i++) {
         if (!filteredRows[rowId]) {
-          putInt(counter++, value);
+          columnVector.putInt(counter++, value);
         }
         rowId++;
       }
@@ -98,7 +98,7 @@ class ColumnarVectorWrapper implements CarbonColumnVector {
     if (filteredRowsExist) {
       for (int i = 0; i < count; i++) {
         if (!filteredRows[rowId]) {
-          putLong(counter++, value);
+          columnVector.putLong(counter++, value);
         }
         rowId++;
       }
@@ -116,7 +116,7 @@ class ColumnarVectorWrapper implements CarbonColumnVector {
   @Override public void putDecimals(int rowId, int count, Decimal value, int precision) {
     for (int i = 0; i < count; i++) {
       if (!filteredRows[rowId]) {
-        putDecimal(counter++, value, precision);
+        columnVector.putDecimal(counter++, value, precision);
       }
       rowId++;
     }
@@ -132,7 +132,7 @@ class ColumnarVectorWrapper implements CarbonColumnVector {
     if (filteredRowsExist) {
       for (int i = 0; i < count; i++) {
         if (!filteredRows[rowId]) {
-          putDouble(counter++, value);
+          columnVector.putDouble(counter++, value);
         }
         rowId++;
       }
@@ -150,7 +150,7 @@ class ColumnarVectorWrapper implements CarbonColumnVector {
   @Override public void putBytes(int rowId, int count, byte[] value) {
     for (int i = 0; i < count; i++) {
       if (!filteredRows[rowId]) {
-        putBytes(counter++, value);
+        columnVector.putByteArray(counter++, value);
       }
       rowId++;
     }
@@ -172,7 +172,7 @@ class ColumnarVectorWrapper implements CarbonColumnVector {
     if (filteredRowsExist) {
       for (int i = 0; i < count; i++) {
         if (!filteredRows[rowId]) {
-          putNull(counter++);
+          columnVector.putNull(counter++);
         }
         rowId++;
       }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/809d8806/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/CarbonHiveCommands.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/CarbonHiveCommands.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/CarbonHiveCommands.scala
index 2786620..b72f077 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/CarbonHiveCommands.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/CarbonHiveCommands.scala
@@ -31,13 +31,12 @@ case class CarbonDropDatabaseCommand(command: DropDatabaseCommand)
     val rows = command.run(sparkSession)
     if (command.cascade) {
       val tablesInDB = CarbonEnv.getInstance(sparkSession).carbonMetastore.getAllTables()
-        .filterNot(_.database.exists(_.equalsIgnoreCase(dbName)))
+        .filter(_.database.exists(_.equalsIgnoreCase(dbName)))
       tablesInDB.foreach { tableName =>
-        CarbonDropTableCommand(true, Some(dbName), tableName.table).run(sparkSession)
+        CarbonDropTableCommand(true, tableName.database, tableName.table).run(sparkSession)
       }
     }
-    CarbonEnv.getInstance(sparkSession).carbonMetastore.dropDatabaseDirectory(dbName)
+    CarbonEnv.getInstance(sparkSession).carbonMetastore.dropDatabaseDirectory(dbName.toLowerCase)
     rows
   }
 }
-


[10/42] carbondata git commit: [CARBONDATA-989] decompress error while load 'gz' and 'bz2' data into table

Posted by ra...@apache.org.
[CARBONDATA-989] decompress error while load 'gz' and 'bz2' data into table


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/59d55454
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/59d55454
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/59d55454

Branch: refs/heads/branch-1.1
Commit: 59d55454c2bd606bb59c6ea9997191fa38480916
Parents: 9e913e0
Author: ranmx <ra...@fosun.com>
Authored: Thu Apr 27 16:17:33 2017 +0800
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 12:45:27 2017 +0530

----------------------------------------------------------------------
 .../core/datastore/impl/FileFactory.java        |  12 +-
 .../core/datastore/CompressdFileTest.java       | 120 +++++++++++++++++++
 2 files changed, 129 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/59d55454/core/src/main/java/org/apache/carbondata/core/datastore/impl/FileFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/impl/FileFactory.java b/core/src/main/java/org/apache/carbondata/core/datastore/impl/FileFactory.java
index c32b956..de78a3f 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/impl/FileFactory.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/impl/FileFactory.java
@@ -40,6 +40,8 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.compress.BZip2Codec;
+import org.apache.hadoop.io.compress.CompressionCodec;
+import org.apache.hadoop.io.compress.CompressionCodecFactory;
 import org.apache.hadoop.io.compress.GzipCodec;
 
 public final class FileFactory {
@@ -131,11 +133,15 @@ public final class FileFactory {
         } else {
           stream = fs.open(pt, bufferSize);
         }
+        String codecName = null;
         if (gzip) {
-          GzipCodec codec = new GzipCodec();
-          stream = codec.createInputStream(stream);
+          codecName = GzipCodec.class.getName();
         } else if (bzip2) {
-          BZip2Codec codec = new BZip2Codec();
+          codecName = BZip2Codec.class.getName();
+        }
+        if (null != codecName) {
+          CompressionCodecFactory ccf = new CompressionCodecFactory(configuration);
+          CompressionCodec codec = ccf.getCodecByClassName(codecName);
           stream = codec.createInputStream(stream);
         }
         break;

http://git-wip-us.apache.org/repos/asf/carbondata/blob/59d55454/core/src/test/java/org/apache/carbondata/core/datastore/CompressdFileTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/carbondata/core/datastore/CompressdFileTest.java b/core/src/test/java/org/apache/carbondata/core/datastore/CompressdFileTest.java
new file mode 100644
index 0000000..14aec44
--- /dev/null
+++ b/core/src/test/java/org/apache/carbondata/core/datastore/CompressdFileTest.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.carbondata.core.datastore;
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.datastore.impl.FileFactory;
+import org.apache.carbondata.core.datastore.impl.FileFactory.*;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+
+import java.io.DataInputStream;
+import java.io.FileOutputStream;
+import java.io.File;
+import java.io.Writer;
+import java.io.BufferedReader;
+import java.io.InputStreamReader;
+import java.io.OutputStreamWriter;
+import java.nio.charset.Charset;
+import java.util.zip.GZIPOutputStream;
+
+import org.apache.commons.compress.compressors.bzip2.BZip2CompressorOutputStream;
+import static junit.framework.TestCase.assertTrue;
+
+
+public class CompressdFileTest
+{
+  @BeforeClass public static void setUp() throws Exception  {
+    String path = "../core/src/test/resources/testFile";
+    String content = "hello world";
+
+    makeGzipFile(path, content);
+    makeBzip2File(path, content);
+
+  }
+
+  private static void makeGzipFile (String path, String content) throws Exception {
+    path = path + ".gz";
+    FileOutputStream output = new FileOutputStream(path);
+    try {
+      Writer writer = new OutputStreamWriter(new GZIPOutputStream(output),
+          "UTF-8");
+      try {
+        writer.write(content);
+      } finally {
+        writer.close();
+      }
+    } finally {
+      output.close();
+  }
+}
+
+  private static void makeBzip2File (String path, String content) throws Exception {
+    path = path + ".bz2";
+    FileOutputStream output = new FileOutputStream(path);
+    try {
+      Writer writer = new OutputStreamWriter(new BZip2CompressorOutputStream(output),
+          "UTF-8");
+      try {
+        writer.write(content);
+      } finally {
+        writer.close();
+      }
+    } finally {
+      output.close();
+    }
+  }
+
+  @Test public void testReadGzFile() throws Exception {
+    assertTrue(readCompressed("../core/src/test/resources/testFile.gz").equals("hello world"));
+  }
+
+  @Test public void testReadBzip2File() throws Exception {
+    assertTrue(readCompressed("../core/src/test/resources/testFile.bz2").equals("hello world"));
+  }
+
+  private static String readCompressed(String path) throws Exception {
+      DataInputStream fileReader = null;
+      BufferedReader bufferedReader = null;
+      String readLine = null;
+
+      try {
+        fileReader =
+            FileFactory.getDataInputStream(path, FileType.HDFS);
+        bufferedReader = new BufferedReader(new InputStreamReader(fileReader,
+            Charset.forName(CarbonCommonConstants.DEFAULT_CHARSET)));
+        readLine = bufferedReader.readLine();
+      } finally {
+        if (null != fileReader) {
+          fileReader.close();
+        }
+
+        if (null != bufferedReader) {
+          bufferedReader.close();
+        }
+      }
+      return readLine;
+  }
+
+  @AfterClass public static void testCleanUp() {
+    new File("../core/src/test/resources/testFile.gz").deleteOnExit();
+    new File("../core/src/test/resources/testFile.bz2").deleteOnExit();
+  }
+}


[29/42] carbondata git commit: condition for single pass

Posted by ra...@apache.org.
condition for single pass


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/bbcc487a
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/bbcc487a
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/bbcc487a

Branch: refs/heads/branch-1.1
Commit: bbcc487a423d1ba42efd1926457fe27d763c00af
Parents: 917152a
Author: sgururajshetty <sg...@gmail.com>
Authored: Tue May 30 16:42:32 2017 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 13:17:34 2017 +0530

----------------------------------------------------------------------
 docs/configuration-parameters.md    | 2 +-
 docs/dml-operation-on-carbondata.md | 3 +++
 2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/bbcc487a/docs/configuration-parameters.md
----------------------------------------------------------------------
diff --git a/docs/configuration-parameters.md b/docs/configuration-parameters.md
index c63f73d..b71cdbc 100644
--- a/docs/configuration-parameters.md
+++ b/docs/configuration-parameters.md
@@ -130,7 +130,7 @@ This section provides the details of all the configurations required for CarbonD
   
 | Parameter | Default Value | Description |
 |---------------------------------------|---------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| high.cardinality.identify.enable | true | If the parameter is true, the high cardinality columns of the dictionary code are automatically recognized and these columns will not be used as global dictionary encoding. If the parameter is false, all dictionary encoding columns are used as dictionary encoding. The high cardinality column must meet the following requirements: value of cardinality > configured value of high.cardinality. Equally, the value of cardinality is higher than the threshold.value of cardinality/ row number x 100 > configured value of high.cardinality.row.count.percentage. Equally, the ratio of the cardinality value to data row number is higher than the configured percentage. |
+| high.cardinality.identify.enable | true | If the parameter is true, the high cardinality columns of the dictionary code are automatically recognized and these columns will not be used as global dictionary encoding. If the parameter is false, all dictionary encoding columns are used as dictionary encoding. The high cardinality column must meet the following requirements: value of cardinality > configured value of high.cardinality. Equally, the value of cardinality is higher than the threshold.value of cardinality/ row number x 100 > configured value of high.cardinality.row.count.percentage. Equally, the ratio of the cardinality value to data row number is higher than the configured percentage. Note: If SINGLE_PASS is used during data load, then this property will be disabled.|
 | high.cardinality.threshold | 1000000  | It is a threshold to identify high cardinality of the columns.If the value of columns' cardinality > the configured value, then the columns are excluded from dictionary encoding. |
 | high.cardinality.row.count.percentage | 80 | Percentage to identify whether column cardinality is more than configured percent of total row count.Configuration value formula:Value of cardinality/ row number x 100 > configured value of high.cardinality.row.count.percentage. The value of the parameter must be larger than 0. |
 | carbon.cutOffTimestamp | 1970-01-01 05:30:00 | Sets the start date for calculating the timestamp. Java counts the number of milliseconds from start of "1970-01-01 00:00:00". This property is used to customize the start of position. For example "2000-01-01 00:00:00". The date must be in the form "carbon.timestamp.format". NOTE: The CarbonData supports data store up to 68 years from the cut-off time defined. For example, if the cut-off time is 1970-01-01 05:30:00, then the data can be stored up to 2038-01-01 05:30:00. |

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bbcc487a/docs/dml-operation-on-carbondata.md
----------------------------------------------------------------------
diff --git a/docs/dml-operation-on-carbondata.md b/docs/dml-operation-on-carbondata.md
index e315468..45f7464 100644
--- a/docs/dml-operation-on-carbondata.md
+++ b/docs/dml-operation-on-carbondata.md
@@ -146,6 +146,9 @@ You can use the following options to load data:
    * If this option is set to TRUE then data loading will take less time.
 
    * If this option is set to some invalid value other than TRUE or FALSE then it uses the default value.
+   
+   * If this option is set to TRUE, then high.cardinality.identify.enable property will be disabled during data load.
+   
 ### Example:
 
 ```


[09/42] carbondata git commit: Spark 2x tupleId support for IUD Feature

Posted by ra...@apache.org.
Spark 2x tupleId support for IUD Feature


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/9e913e00
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/9e913e00
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/9e913e00

Branch: refs/heads/branch-1.1
Commit: 9e913e006ff06b6e16e92e0bb26504ab49f44cdc
Parents: 49e8b00
Author: nareshpr <pr...@gmail.com>
Authored: Mon May 15 13:10:08 2017 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 12:45:15 2017 +0530

----------------------------------------------------------------------
 .../scala/org/apache/spark/sql/CarbonEnv.scala  |  1 +
 .../execution/CarbonLateDecodeStrategy.scala    | 16 +++++++--
 .../sql/optimizer/CarbonLateDecodeRule.scala    | 36 ++++++++++++++++++--
 .../carbondata/query/SubQueryTestSuite.scala    |  4 +++
 4 files changed, 52 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/9e913e00/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonEnv.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonEnv.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonEnv.scala
index a286e56..b46488c 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonEnv.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonEnv.scala
@@ -43,6 +43,7 @@ class CarbonEnv {
   var initialized = false
 
   def init(sparkSession: SparkSession): Unit = {
+    sparkSession.udf.register("getTupleId", () => "")
     if (!initialized) {
       carbonMetastore = {
         val storePath =

http://git-wip-us.apache.org/repos/asf/carbondata/blob/9e913e00/integration/spark2/src/main/scala/org/apache/spark/sql/execution/CarbonLateDecodeStrategy.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/CarbonLateDecodeStrategy.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/CarbonLateDecodeStrategy.scala
index 346e105..ac43a12 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/CarbonLateDecodeStrategy.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/CarbonLateDecodeStrategy.scala
@@ -239,9 +239,20 @@ private[sql] class CarbonLateDecodeStrategy extends SparkStrategy {
         updateRequestedColumns.asInstanceOf[Seq[Attribute]])
       filterCondition.map(execution.FilterExec(_, scan)).getOrElse(scan)
     } else {
+
+      var newProjectList: Seq[Attribute] = Seq.empty
+      val updatedProjects = projects.map {
+          case a@Alias(s: ScalaUDF, name)
+            if name.equalsIgnoreCase(CarbonCommonConstants.POSITION_ID) ||
+                name.equalsIgnoreCase(CarbonCommonConstants.CARBON_IMPLICIT_COLUMN_TUPLEID) =>
+            val reference = AttributeReference(name, StringType, true)().withExprId(a.exprId)
+            newProjectList :+= reference
+            reference
+          case other => other
+      }
       // Don't request columns that are only referenced by pushed filters.
       val requestedColumns =
-      (projectSet ++ filterSet -- handledSet).map(relation.attributeMap).toSeq
+      (projectSet ++ filterSet -- handledSet).map(relation.attributeMap).toSeq ++ newProjectList
       val updateRequestedColumns = updateRequestedColumnsFunc(requestedColumns, table, needDecoder)
       val scan = getDataSourceScan(relation,
         updateRequestedColumns.asInstanceOf[Seq[Attribute]],
@@ -252,7 +263,8 @@ private[sql] class CarbonLateDecodeStrategy extends SparkStrategy {
         needDecoder,
         updateRequestedColumns.asInstanceOf[Seq[Attribute]])
       execution.ProjectExec(
-        updateRequestedColumnsFunc(projects, table, needDecoder).asInstanceOf[Seq[NamedExpression]],
+        updateRequestedColumnsFunc(updatedProjects, table,
+          needDecoder).asInstanceOf[Seq[NamedExpression]],
         filterCondition.map(execution.FilterExec(_, scan)).getOrElse(scan))
     }
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/9e913e00/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala
index aff34ea..fd6f14e 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala
@@ -20,7 +20,6 @@ package org.apache.spark.sql.optimizer
 import java.util
 
 import scala.collection.JavaConverters._
-import scala.collection.mutable
 import scala.collection.mutable.ArrayBuffer
 
 import org.apache.spark.sql._
@@ -30,9 +29,10 @@ import org.apache.spark.sql.catalyst.plans.logical._
 import org.apache.spark.sql.catalyst.rules.Rule
 import org.apache.spark.sql.execution.command.RunnableCommand
 import org.apache.spark.sql.execution.datasources.LogicalRelation
-import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
+import org.apache.spark.sql.types.{IntegerType, StringType}
 
 import org.apache.carbondata.common.logging.LogServiceFactory
+import org.apache.carbondata.core.constants.CarbonCommonConstants
 import org.apache.carbondata.core.stats.QueryStatistic
 import org.apache.carbondata.core.util.CarbonTimeStatisticsFactory
 import org.apache.carbondata.spark.{CarbonAliasDecoderRelation, CarbonFilters}
@@ -69,9 +69,10 @@ class CarbonLateDecodeRule extends Rule[LogicalPlan] with PredicateHelper {
         return plan
       }
       LOGGER.info("Starting to optimize plan")
+      val udfTransformedPlan = pushDownUDFToJoinLeftRelation(plan)
       val recorder = CarbonTimeStatisticsFactory.createExecutorRecorder("")
       val queryStatistic = new QueryStatistic()
-      val result = transformCarbonPlan(plan, relations)
+      val result = transformCarbonPlan(udfTransformedPlan, relations)
       queryStatistic.addStatistics("Time taken for Carbon Optimizer to optimize: ",
         System.currentTimeMillis)
       recorder.recordStatistics(queryStatistic)
@@ -83,6 +84,35 @@ class CarbonLateDecodeRule extends Rule[LogicalPlan] with PredicateHelper {
     }
   }
 
+  private def pushDownUDFToJoinLeftRelation(plan: LogicalPlan): LogicalPlan = {
+    val output = plan match {
+      case proj@Project(cols, Join(
+      left, right, jointype: org.apache.spark.sql.catalyst.plans.JoinType, condition)) =>
+        var projectionToBeAdded: Seq[org.apache.spark.sql.catalyst.expressions.Alias] = Seq.empty
+        val newCols = cols.map { col =>
+          col match {
+            case a@Alias(s: ScalaUDF, name)
+              if (name.equalsIgnoreCase(CarbonCommonConstants.POSITION_ID) ||
+                  name.equalsIgnoreCase(
+                    CarbonCommonConstants.CARBON_IMPLICIT_COLUMN_TUPLEID)) =>
+              projectionToBeAdded :+= a
+              AttributeReference(name, StringType, true)().withExprId(a.exprId)
+            case other => other
+          }
+        }
+        val newLeft = left match {
+          case Project(columns, logicalPlan) =>
+            Project(columns ++ projectionToBeAdded, logicalPlan)
+          case filter: Filter =>
+            Project(filter.output ++ projectionToBeAdded, filter)
+          case other => other
+        }
+        Project(newCols, Join(newLeft, right, jointype, condition))
+      case other => other
+    }
+    output
+  }
+
   def isOptimized(plan: LogicalPlan): Boolean = {
     plan find {
       case cd: CarbonDictionaryCatalystDecoder => true

http://git-wip-us.apache.org/repos/asf/carbondata/blob/9e913e00/integration/spark2/src/test/scala/org/apache/spark/carbondata/query/SubQueryTestSuite.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/spark/carbondata/query/SubQueryTestSuite.scala b/integration/spark2/src/test/scala/org/apache/spark/carbondata/query/SubQueryTestSuite.scala
index f6ad961..ad56173 100644
--- a/integration/spark2/src/test/scala/org/apache/spark/carbondata/query/SubQueryTestSuite.scala
+++ b/integration/spark2/src/test/scala/org/apache/spark/carbondata/query/SubQueryTestSuite.scala
@@ -56,6 +56,10 @@ class SubQueryTestSuite extends QueryTest with BeforeAndAfterAll {
     sql("drop table if exists anothertable")
   }
 
+  test("tupleId") {
+    checkExistence(sql("select getTupleId() as tupleId from subquery"), true, "0/0/0-0_batchno0-0-")
+  }
+
   override def afterAll() {
     sql("drop table if exists subquery")
   }


[38/42] carbondata git commit: Fixed Synchronization issue and improve IUD performance

Posted by ra...@apache.org.
http://git-wip-us.apache.org/repos/asf/carbondata/blob/da952e82/processing/src/test/java/org/apache/carbondata/carbon/datastore/BlockIndexStoreTest.java
----------------------------------------------------------------------
diff --git a/processing/src/test/java/org/apache/carbondata/carbon/datastore/BlockIndexStoreTest.java b/processing/src/test/java/org/apache/carbondata/carbon/datastore/BlockIndexStoreTest.java
index 23f7230..d5a4f02 100644
--- a/processing/src/test/java/org/apache/carbondata/carbon/datastore/BlockIndexStoreTest.java
+++ b/processing/src/test/java/org/apache/carbondata/carbon/datastore/BlockIndexStoreTest.java
@@ -81,7 +81,7 @@ public class BlockIndexStoreTest extends TestCase {
     File file = getPartFile();
     TableBlockInfo info =
         new TableBlockInfo(file.getAbsolutePath(), 0, "0", new String[] { "loclhost" },
-            file.length(), ColumnarFormatVersion.V1);
+            file.length(), ColumnarFormatVersion.V1, null);
     CarbonTableIdentifier carbonTableIdentifier =
             new CarbonTableIdentifier(CarbonCommonConstants.DATABASE_DEFAULT_NAME, "t3", "1");
     AbsoluteTableIdentifier absoluteTableIdentifier =
@@ -116,20 +116,20 @@ public class BlockIndexStoreTest extends TestCase {
     File file = getPartFile();
     TableBlockInfo info =
         new TableBlockInfo(file.getAbsolutePath(), 0, "0", new String[] { "loclhost" },
-            file.length(), ColumnarFormatVersion.V1);
+            file.length(), ColumnarFormatVersion.V1, null);
     TableBlockInfo info1 =
         new TableBlockInfo(file.getAbsolutePath(), 0, "0", new String[] { "loclhost" },
-            file.length(), ColumnarFormatVersion.V1);
+            file.length(), ColumnarFormatVersion.V1, null);
 
     TableBlockInfo info2 =
         new TableBlockInfo(file.getAbsolutePath(), 0, "1", new String[] { "loclhost" },
-            file.length(), ColumnarFormatVersion.V1);
+            file.length(), ColumnarFormatVersion.V1, null);
     TableBlockInfo info3 =
         new TableBlockInfo(file.getAbsolutePath(), 0, "1", new String[] { "loclhost" },
-            file.length(), ColumnarFormatVersion.V1);
+            file.length(), ColumnarFormatVersion.V1, null);
     TableBlockInfo info4 =
         new TableBlockInfo(file.getAbsolutePath(), 0, "1", new String[] { "loclhost" },
-            file.length(), ColumnarFormatVersion.V1);
+            file.length(), ColumnarFormatVersion.V1, null);
 
     CarbonTableIdentifier carbonTableIdentifier =
             new CarbonTableIdentifier(CarbonCommonConstants.DATABASE_DEFAULT_NAME, "t3", "1");
@@ -176,31 +176,31 @@ public class BlockIndexStoreTest extends TestCase {
     File file = getPartFile();
     TableBlockInfo info =
         new TableBlockInfo(file.getAbsolutePath(), 0, "0", new String[] { "loclhost" },
-            file.length(), ColumnarFormatVersion.V1);
+            file.length(), ColumnarFormatVersion.V1, null);
     TableBlockInfo info1 =
         new TableBlockInfo(file.getAbsolutePath(), 0, "0", new String[] { "loclhost" },
-            file.length(), ColumnarFormatVersion.V1);
+            file.length(), ColumnarFormatVersion.V1, null);
 
     TableBlockInfo info2 =
         new TableBlockInfo(file.getAbsolutePath(), 0, "1", new String[] { "loclhost" },
-            file.length(), ColumnarFormatVersion.V1);
+            file.length(), ColumnarFormatVersion.V1, null);
     TableBlockInfo info3 =
         new TableBlockInfo(file.getAbsolutePath(), 0, "1", new String[] { "loclhost" },
-            file.length(), ColumnarFormatVersion.V1);
+            file.length(), ColumnarFormatVersion.V1, null);
     TableBlockInfo info4 =
         new TableBlockInfo(file.getAbsolutePath(), 0, "1", new String[] { "loclhost" },
-            file.length(), ColumnarFormatVersion.V1);
+            file.length(), ColumnarFormatVersion.V1, null);
 
     TableBlockInfo info5 =
         new TableBlockInfo(file.getAbsolutePath(), 0, "2", new String[] { "loclhost" },
-            file.length(),ColumnarFormatVersion.V1);
+            file.length(),ColumnarFormatVersion.V1, null);
     TableBlockInfo info6 =
         new TableBlockInfo(file.getAbsolutePath(), 0, "2", new String[] { "loclhost" },
-            file.length(), ColumnarFormatVersion.V1);
+            file.length(), ColumnarFormatVersion.V1, null);
 
     TableBlockInfo info7 =
         new TableBlockInfo(file.getAbsolutePath(), 0, "3", new String[] { "loclhost" },
-            file.length(), ColumnarFormatVersion.V1);
+            file.length(), ColumnarFormatVersion.V1, null);
 
     CarbonTableIdentifier carbonTableIdentifier =
             new CarbonTableIdentifier(CarbonCommonConstants.DATABASE_DEFAULT_NAME, "t3", "1");


[22/42] carbondata git commit: Fixed all testcases of IUD in spark 2.1

Posted by ra...@apache.org.
Fixed all testcases of IUD in spark 2.1

Fixed style and review comments


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/9d16d504
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/9d16d504
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/9d16d504

Branch: refs/heads/branch-1.1
Commit: 9d16d504ad0a6746da82f421f2e2eec9a313a8e5
Parents: b202697
Author: ravipesala <ra...@gmail.com>
Authored: Mon May 29 12:24:15 2017 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 13:06:12 2017 +0530

----------------------------------------------------------------------
 .../iud/DeleteCarbonTableTestCase.scala         |  77 ++-
 .../iud/HorizontalCompactionTestCase.scala      | 366 ++++++++++
 .../iud/UpdateCarbonTableTestCase.scala         | 680 ++++++++++---------
 .../iud/DeleteCarbonTableTestCase.scala         | 130 ----
 .../testsuite/iud/IUDCompactionTestCases.scala  | 361 ----------
 .../iud/UpdateCarbonTableTestCase.scala         | 393 -----------
 .../spark/rdd/CarbonDataRDDFactory.scala        | 139 +++-
 .../spark/sql/CarbonCatalystOperators.scala     |   2 +-
 .../sql/execution/command/IUDCommands.scala     |  14 +-
 .../spark/sql/hive/CarbonAnalysisRules.scala    |  53 +-
 .../sql/optimizer/CarbonLateDecodeRule.scala    |  11 +-
 .../sql/parser/CarbonSpark2SqlParser.scala      |   2 +-
 .../store/writer/AbstractFactDataWriter.java    |   2 +-
 13 files changed, 921 insertions(+), 1309 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/9d16d504/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala
index 33ae0d3..0346067 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala
@@ -20,21 +20,24 @@ import org.apache.spark.sql.Row
 import org.apache.spark.sql.common.util.QueryTest
 import org.scalatest.BeforeAndAfterAll
 
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.util.CarbonProperties
+
 class DeleteCarbonTableTestCase extends QueryTest with BeforeAndAfterAll {
   override def beforeAll {
-
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.ENABLE_VECTOR_READER , "false")
     sql("use default")
     sql("drop database  if exists iud_db cascade")
     sql("create database  iud_db")
 
     sql("""create table iud_db.source2 (c11 string,c22 int,c33 string,c55 string, c66 int) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""LOAD DATA LOCAL INPATH 'D:/apacheCarbon/carbondata/integration/spark-common-test/src/test/resources/IUD/source2.csv' INTO table iud_db.source2""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/source2.csv' INTO table iud_db.source2""")
     sql("use iud_db")
   }
   test("delete data from carbon table with alias [where clause ]") {
     sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""LOAD DATA LOCAL INPATH 'D:/apacheCarbon/carbondata/integration/spark-common-test/src/test/resources/IUD/dest.csv' INTO table iud_db.dest""")
-//    sql(s"""select getTupleId() as tupleId from dest """).show
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud_db.dest""")
     sql("""delete from iud_db.dest d where d.c1 = 'a'""").show
     checkAnswer(
       sql("""select c2 from iud_db.dest"""),
@@ -44,18 +47,18 @@ class DeleteCarbonTableTestCase extends QueryTest with BeforeAndAfterAll {
   test("delete data from  carbon table[where clause ]") {
     sql("""drop table if exists iud_db.dest""")
     sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""LOAD DATA LOCAL INPATH 'D:/apacheCarbon/carbondata/integration/spark-common-test/src/test/resources/IUD/dest.csv' INTO table iud_db.dest""")
-    sql("""delete from iud_db.dest e where e.c2 = 2""").show
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud_db.dest""")
+    sql("""delete from iud_db.dest where c2 = 2""").show
     checkAnswer(
-      sql("""select c1 from dest"""),
+      sql("""select c1 from iud_db.dest"""),
       Seq(Row("a"), Row("c"), Row("d"), Row("e"))
     )
   }
   test("delete data from  carbon table[where IN  ]") {
     sql("""drop table if exists iud_db.dest""")
     sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""LOAD DATA LOCAL INPATH 'D:/apacheCarbon/carbondata/integration/spark-common-test/src/test/resources/IUD/dest.csv' INTO table iud_db.dest""")
-    sql("""delete from iud_db.dest where c1 IN ('d', 'e')""").show
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud_db.dest""")
+    sql("""delete from dest where c1 IN ('d', 'e')""").show
     checkAnswer(
       sql("""select c1 from dest"""),
       Seq(Row("a"), Row("b"),Row("c"))
@@ -65,7 +68,7 @@ class DeleteCarbonTableTestCase extends QueryTest with BeforeAndAfterAll {
   test("delete data from  carbon table[with alias No where clause]") {
     sql("""drop table if exists iud_db.dest""")
     sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""LOAD DATA LOCAL INPATH 'D:/apacheCarbon/carbondata/integration/spark-common-test/src/test/resources/IUD/dest.csv' INTO table iud_db.dest""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud_db.dest""")
     sql("""delete from iud_db.dest a""").show
     checkAnswer(
       sql("""select c1 from iud_db.dest"""),
@@ -75,7 +78,7 @@ class DeleteCarbonTableTestCase extends QueryTest with BeforeAndAfterAll {
   test("delete data from  carbon table[No alias No where clause]") {
     sql("""drop table if exists iud_db.dest""")
     sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""LOAD DATA LOCAL INPATH 'D:/apacheCarbon/carbondata/integration/spark-common-test/src/test/resources/IUD/dest.csv' INTO table iud_db.dest""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud_db.dest""")
     sql("""delete from dest""").show()
     checkAnswer(
       sql("""select c1 from dest"""),
@@ -86,7 +89,7 @@ class DeleteCarbonTableTestCase extends QueryTest with BeforeAndAfterAll {
   test("delete data from  carbon table[ JOIN with another table ]") {
     sql("""drop table if exists iud_db.dest""")
     sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""LOAD DATA LOCAL INPATH 'D:/apacheCarbon/carbondata/integration/spark-common-test/src/test/resources/IUD/dest.csv' INTO table iud_db.dest""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud_db.dest""")
     sql(""" DELETE FROM dest t1 INNER JOIN source2 t2 ON t1.c1 = t2.c11""").show(truncate = false)
     checkAnswer(
       sql("""select c1 from iud_db.dest"""),
@@ -94,38 +97,40 @@ class DeleteCarbonTableTestCase extends QueryTest with BeforeAndAfterAll {
     )
   }
 
-  test("delete data from  carbon table[where IN (sub query) ]") {
-    sql("""drop table if exists iud_db.dest""")
-    sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""").show()
-    sql("""LOAD DATA LOCAL INPATH 'D:/apacheCarbon/carbondata/integration/spark-common-test/src/test/resources/IUD/dest.csv' INTO table iud_db.dest""")
-    sql("""delete from  iud_db.dest where c1 IN (select c11 from source2)""").show(truncate = false)
-    checkAnswer(
-      sql("""select c1 from iud_db.dest"""),
-      Seq(Row("c"), Row("d"), Row("e"))
-    )
-  }
-  test("delete data from  carbon table[where IN (sub query with where clause) ]") {
-    sql("""drop table if exists iud_db.dest""")
-    sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""").show()
-    sql("""LOAD DATA LOCAL INPATH 'D:/apacheCarbon/carbondata/integration/spark-common-test/src/test/resources/IUD/dest.csv' INTO table iud_db.dest""")
-    sql("""delete from  iud_db.dest where c1 IN (select c11 from source2 where c11 = 'b')""").show()
-    checkAnswer(
-      sql("""select c1 from iud_db.dest"""),
-      Seq(Row("a"), Row("c"), Row("d"), Row("e"))
-    )
-  }
+//  test("delete data from  carbon table[where IN (sub query) ]") {
+//    sql("""drop table if exists iud_db.dest""")
+//    sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""").show()
+//    sql("""LOAD DATA LOCAL INPATH './src/test/resources/IUD/dest.csv' INTO table iud_db.dest""")
+//    sql("""delete from  iud_db.dest where c1 IN (select c11 from source2)""").show(truncate = false)
+//    checkAnswer(
+//      sql("""select c1 from iud_db.dest"""),
+//      Seq(Row("c"), Row("d"), Row("e"))
+//    )
+//  }
+//  test("delete data from  carbon table[where IN (sub query with where clause) ]") {
+//    sql("""drop table if exists iud_db.dest""")
+//    sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""").show()
+//    sql("""LOAD DATA LOCAL INPATH './src/test/resources/IUD/dest.csv' INTO table iud_db.dest""")
+//    sql("""delete from  iud_db.dest where c1 IN (select c11 from source2 where c11 = 'b')""").show()
+//    checkAnswer(
+//      sql("""select c1 from iud_db.dest"""),
+//      Seq(Row("a"), Row("c"), Row("d"), Row("e"))
+//    )
+//  }
   test("delete data from  carbon table[where numeric condition  ]") {
     sql("""drop table if exists iud_db.dest""")
     sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""LOAD DATA LOCAL INPATH 'D:/apacheCarbon/carbondata/integration/spark-common-test/src/test/resources/IUD/dest.csv' INTO table iud_db.dest""")
-    sql("""delete from iud_db.dest where c2 >= 4""").show()
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud_db.dest""")
+    sql("""delete from  iud_db.dest where c2 >= 4""").show()
     checkAnswer(
       sql("""select count(*) from iud_db.dest"""),
       Seq(Row(3))
     )
   }
   override def afterAll {
-  //  sql("use default")
-  //  sql("drop database  if exists iud_db cascade")
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.ENABLE_VECTOR_READER , "true")
+    sql("use default")
+    sql("drop database  if exists iud_db cascade")
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/9d16d504/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/HorizontalCompactionTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/HorizontalCompactionTestCase.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/HorizontalCompactionTestCase.scala
new file mode 100644
index 0000000..9c3b261
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/HorizontalCompactionTestCase.scala
@@ -0,0 +1,366 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.spark.testsuite.iud
+
+import org.apache.spark.sql.Row
+import org.apache.spark.sql.common.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.util.CarbonProperties
+
+
+class HorizontalCompactionTestCase extends QueryTest with BeforeAndAfterAll {
+  override def beforeAll {
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.ENABLE_VECTOR_READER , "false")
+    sql("""drop database if exists iud4 cascade""")
+    sql("""create database iud4""")
+    sql("""use iud4""")
+    sql(
+      """create table iud4.dest (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/comp1.csv' INTO table iud4.dest""")
+    sql(
+      """create table iud4.source2 (c11 string,c22 int,c33 string,c55 string, c66 int) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/source3.csv' INTO table iud4.source2""")
+    sql("""create table iud4.other (c1 string,c2 int) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/other.csv' INTO table iud4.other""")
+    sql(
+      """create table iud4.hdest (c1 string,c2 int,c3 string,c5 string) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n' STORED AS TEXTFILE""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/comp1.csv' INTO table iud4.hdest""")
+    sql(
+      """CREATE TABLE iud4.update_01(imei string,age int,task bigint,num double,level decimal(10,3),name string)STORED BY 'org.apache.carbondata.format' """)
+    sql(
+      s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/update01.csv' INTO TABLE iud4.update_01 OPTIONS('BAD_RECORDS_LOGGER_ENABLE' = 'FALSE', 'BAD_RECORDS_ACTION' = 'FORCE') """)
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.isHorizontalCompactionEnabled, "true")
+  }
+
+
+
+  test("test IUD Horizontal Compaction Update Alter Clean") {
+    sql("""drop database if exists iud4 cascade""")
+    sql("""create database iud4""")
+    sql("""use iud4""")
+    sql(
+      """create table dest2 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+
+    sql(s"""load data local inpath '$resourcesPath/IUD/comp1.csv' INTO table dest2""")
+    sql(s"""load data local inpath '$resourcesPath/IUD/comp2.csv' INTO table dest2""")
+    sql(s"""load data local inpath '$resourcesPath/IUD/comp3.csv' INTO table dest2""")
+    sql(s"""load data local inpath '$resourcesPath/IUD/comp4.csv' INTO table dest2""")
+    sql(
+      """create table source2 (c11 string,c22 int,c33 string,c55 string, c66 int) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/source3.csv' INTO table source2""")
+    sql(
+      """update dest2 d set (d.c3, d.c5 ) = (select s.c33,s.c55 from source2 s where d.c1 = s.c11 and s.c22 < 3 or (s.c22 > 10 and s.c22 < 13) or (s.c22 > 20 and s.c22 < 23) or (s.c22 > 30 and s.c22 < 33))""")
+      .show()
+    sql(
+      """update dest2 d set (d.c3, d.c5 ) = (select s.c33,s.c55 from source2 s where d.c1 = s.c11 and (s.c22 > 3 and s.c22 < 5) or (s.c22 > 13 and s.c22 < 15) or (s.c22 > 23 and s.c22 < 25) or (s.c22 > 33 and s.c22 < 35))""")
+      .show()
+    sql(
+      """update dest2 d set (d.c3, d.c5 ) = (select s.c33,s.c55 from source2 s where d.c1 = s.c11 and (s.c22 > 5 and c22 < 8) or (s.c22 > 15 and s.c22 < 18 ) or (s.c22 > 25 and c22 < 28) or (s.c22 > 35 and c22 < 38))""")
+      .show()
+    sql("""alter table dest2 compact 'minor'""")
+    sql("""clean files for table dest2""")
+    checkAnswer(
+      sql("""select c1,c2,c3,c5 from dest2 order by c2"""),
+      Seq(Row("a", 1, "MGM", "Disco"),
+        Row("b", 2, "RGK", "Music"),
+        Row("c", 3, "cc", "ccc"),
+        Row("d", 4, "YDY", "Weather"),
+        Row("e", 5, "ee", "eee"),
+        Row("f", 6, "ff", "fff"),
+        Row("g", 7, "YTY", "Hello"),
+        Row("h", 8, "hh", "hhh"),
+        Row("i", 9, "ii", "iii"),
+        Row("j", 10, "jj", "jjj"),
+        Row("a", 11, "MGM", "Disco"),
+        Row("b", 12, "RGK", "Music"),
+        Row("c", 13, "cc", "ccc"),
+        Row("d", 14, "YDY", "Weather"),
+        Row("e", 15, "ee", "eee"),
+        Row("f", 16, "ff", "fff"),
+        Row("g", 17, "YTY", "Hello"),
+        Row("h", 18, "hh", "hhh"),
+        Row("i", 19, "ii", "iii"),
+        Row("j", 20, "jj", "jjj"),
+        Row("a", 21, "MGM", "Disco"),
+        Row("b", 22, "RGK", "Music"),
+        Row("c", 23, "cc", "ccc"),
+        Row("d", 24, "YDY", "Weather"),
+        Row("e", 25, "ee", "eee"),
+        Row("f", 26, "ff", "fff"),
+        Row("g", 27, "YTY", "Hello"),
+        Row("h", 28, "hh", "hhh"),
+        Row("i", 29, "ii", "iii"),
+        Row("j", 30, "jj", "jjj"),
+        Row("a", 31, "MGM", "Disco"),
+        Row("b", 32, "RGK", "Music"),
+        Row("c", 33, "cc", "ccc"),
+        Row("d", 34, "YDY", "Weather"),
+        Row("e", 35, "ee", "eee"),
+        Row("f", 36, "ff", "fff"),
+        Row("g", 37, "YTY", "Hello"),
+        Row("h", 38, "hh", "hhh"),
+        Row("i", 39, "ii", "iii"),
+        Row("j", 40, "jj", "jjj"))
+    )
+    sql("""drop table dest2""")
+  }
+
+
+  test("test IUD Horizontal Compaction Delete") {
+    sql("""drop database if exists iud4 cascade""")
+    sql("""create database iud4""")
+    sql("""use iud4""")
+    sql(
+      """create table dest2 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""load data local inpath '$resourcesPath/IUD/comp1.csv' INTO table dest2""")
+    sql(s"""load data local inpath '$resourcesPath/IUD/comp2.csv' INTO table dest2""")
+    sql(s"""load data local inpath '$resourcesPath/IUD/comp3.csv' INTO table dest2""")
+    sql(s"""load data local inpath '$resourcesPath/IUD/comp4.csv' INTO table dest2""")
+    sql("""select * from dest2""")
+    sql(
+      """create table source2 (c11 string,c22 int,c33 string,c55 string, c66 int) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/source3.csv' INTO table source2""")
+    sql("""select * from source2""")
+    sql("""delete from dest2 where (c2 < 3) or (c2 > 10 and c2 < 13) or (c2 > 20 and c2 < 23) or (c2 > 30 and c2 < 33)""").show()
+    sql("""select * from dest2 order by 2""")
+    sql("""delete from dest2 where (c2 > 3 and c2 < 5) or (c2 > 13 and c2 < 15) or (c2 > 23 and c2 < 25) or (c2 > 33 and c2 < 35)""").show()
+    sql("""select * from dest2 order by 2""")
+    sql("""delete from dest2 where (c2 > 5 and c2 < 8) or (c2 > 15 and c2 < 18 ) or (c2 > 25 and c2 < 28) or (c2 > 35 and c2 < 38)""").show()
+    sql("""clean files for table dest2""")
+    checkAnswer(
+      sql("""select c1,c2,c3,c5 from dest2 order by c2"""),
+      Seq(Row("c", 3, "cc", "ccc"),
+        Row("e", 5, "ee", "eee"),
+        Row("h", 8, "hh", "hhh"),
+        Row("i", 9, "ii", "iii"),
+        Row("j", 10, "jj", "jjj"),
+        Row("c", 13, "cc", "ccc"),
+        Row("e", 15, "ee", "eee"),
+        Row("h", 18, "hh", "hhh"),
+        Row("i", 19, "ii", "iii"),
+        Row("j", 20, "jj", "jjj"),
+        Row("c", 23, "cc", "ccc"),
+        Row("e", 25, "ee", "eee"),
+        Row("h", 28, "hh", "hhh"),
+        Row("i", 29, "ii", "iii"),
+        Row("j", 30, "jj", "jjj"),
+        Row("c", 33, "cc", "ccc"),
+        Row("e", 35, "ee", "eee"),
+        Row("h", 38, "hh", "hhh"),
+        Row("i", 39, "ii", "iii"),
+        Row("j", 40, "jj", "jjj"))
+    )
+    sql("""drop table dest2""")
+  }
+
+  test("test IUD Horizontal Compaction Multiple Update Vertical Compaction and Clean") {
+    sql("""drop database if exists iud4 cascade""")
+    sql("""create database iud4""")
+    sql("""use iud4""")
+    sql(
+      """create table dest2 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""load data local inpath '$resourcesPath/IUD/comp1.csv' INTO table dest2""")
+    sql(s"""load data local inpath '$resourcesPath/IUD/comp2.csv' INTO table dest2""")
+    sql(s"""load data local inpath '$resourcesPath/IUD/comp3.csv' INTO table dest2""")
+    sql(s"""load data local inpath '$resourcesPath/IUD/comp4.csv' INTO table dest2""")
+    sql(
+      """create table source2 (c11 string,c22 int,c33 string,c55 string, c66 int) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/source3.csv' INTO table source2""")
+    sql("""update dest2 d set (d.c3, d.c5 ) = (select s.c33,s.c55 from source2 s where d.c1 = s.c11 and s.c22 < 3 or (s.c22 > 10 and s.c22 < 13) or (s.c22 > 20 and s.c22 < 23) or (s.c22 > 30 and s.c22 < 33))""").show()
+    sql("""update dest2 d set (d.c3, d.c5 ) = (select s.c11,s.c66 from source2 s where d.c1 = s.c11 and s.c22 < 3 or (s.c22 > 10 and s.c22 < 13) or (s.c22 > 20 and s.c22 < 23) or (s.c22 > 30 and s.c22 < 33))""").show()
+    sql("""update dest2 d set (d.c3, d.c5 ) = (select s.c33,s.c55 from source2 s where d.c1 = s.c11 and (s.c22 > 3 and s.c22 < 5) or (s.c22 > 13 and s.c22 < 15) or (s.c22 > 23 and s.c22 < 25) or (s.c22 > 33 and s.c22 < 35))""").show()
+    sql("""update dest2 d set (d.c3, d.c5 ) = (select s.c11,s.c66 from source2 s where d.c1 = s.c11 and (s.c22 > 3 and s.c22 < 5) or (s.c22 > 13 and s.c22 < 15) or (s.c22 > 23 and s.c22 < 25) or (s.c22 > 33 and s.c22 < 35))""").show()
+    sql("""update dest2 d set (d.c3, d.c5 ) = (select s.c33,s.c55 from source2 s where d.c1 = s.c11 and (s.c22 > 5 and c22 < 8) or (s.c22 > 15 and s.c22 < 18 ) or (s.c22 > 25 and c22 < 28) or (s.c22 > 35 and c22 < 38))""").show()
+    sql("""update dest2 d set (d.c3, d.c5 ) = (select s.c11,s.c66 from source2 s where d.c1 = s.c11 and (s.c22 > 5 and c22 < 8) or (s.c22 > 15 and s.c22 < 18 ) or (s.c22 > 25 and c22 < 28) or (s.c22 > 35 and c22 < 38))""").show()
+    sql("""alter table dest2 compact 'major'""")
+    sql("""clean files for table dest2""")
+    checkAnswer(
+      sql("""select c1,c2,c3,c5 from dest2 order by c2"""),
+      Seq(Row("a", 1, "a", "10"),
+        Row("b", 2, "b", "8"),
+        Row("c", 3, "cc", "ccc"),
+        Row("d", 4, "d", "9"),
+        Row("e", 5, "ee", "eee"),
+        Row("f", 6, "ff", "fff"),
+        Row("g", 7, "g", "12"),
+        Row("h", 8, "hh", "hhh"),
+        Row("i", 9, "ii", "iii"),
+        Row("j", 10, "jj", "jjj"),
+        Row("a", 11, "a", "10"),
+        Row("b", 12, "b", "8"),
+        Row("c", 13, "cc", "ccc"),
+        Row("d", 14, "d", "9"),
+        Row("e", 15, "ee", "eee"),
+        Row("f", 16, "ff", "fff"),
+        Row("g", 17, "g", "12"),
+        Row("h", 18, "hh", "hhh"),
+        Row("i", 19, "ii", "iii"),
+        Row("j", 20, "jj", "jjj"),
+        Row("a", 21, "a", "10"),
+        Row("b", 22, "b", "8"),
+        Row("c", 23, "cc", "ccc"),
+        Row("d", 24, "d", "9"),
+        Row("e", 25, "ee", "eee"),
+        Row("f", 26, "ff", "fff"),
+        Row("g", 27, "g", "12"),
+        Row("h", 28, "hh", "hhh"),
+        Row("i", 29, "ii", "iii"),
+        Row("j", 30, "jj", "jjj"),
+        Row("a", 31, "a", "10"),
+        Row("b", 32, "b", "8"),
+        Row("c", 33, "cc", "ccc"),
+        Row("d", 34, "d", "9"),
+        Row("e", 35, "ee", "eee"),
+        Row("f", 36, "ff", "fff"),
+        Row("g", 37, "g", "12"),
+        Row("h", 38, "hh", "hhh"),
+        Row("i", 39, "ii", "iii"),
+        Row("j", 40, "jj", "jjj"))
+    )
+    sql("""drop table dest2""")
+    sql("""drop table source2""")
+    sql("""drop database iud4 cascade""")
+  }
+
+  test("test IUD Horizontal Compaction Update Delete and Clean") {
+    sql("""drop database if exists iud4 cascade""")
+    sql("""create database iud4""")
+    sql("""use iud4""")
+    sql(
+      """create table dest2 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""load data local inpath '$resourcesPath/IUD/comp1.csv' INTO table dest2""")
+    sql(s"""load data local inpath '$resourcesPath/IUD/comp2.csv' INTO table dest2""")
+    sql(s"""load data local inpath '$resourcesPath/IUD/comp3.csv' INTO table dest2""")
+    sql(s"""load data local inpath '$resourcesPath/IUD/comp4.csv' INTO table dest2""")
+    sql(
+      """create table source2 (c11 string,c22 int,c33 string,c55 string, c66 int) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/source3.csv' INTO table source2""")
+    sql("""update dest2 d set (d.c3, d.c5 ) = (select s.c33,s.c55 from source2 s where d.c1 = s.c11 and s.c22 < 3 or (s.c22 > 10 and s.c22 < 13) or (s.c22 > 20 and s.c22 < 23) or (s.c22 > 30 and s.c22 < 33))""").show()
+    sql("""delete from dest2 where (c2 < 2) or (c2 > 10 and c2 < 13) or (c2 > 20 and c2 < 23) or (c2 > 30 and c2 < 33)""").show()
+    sql("""delete from dest2 where (c2 > 3 and c2 < 5) or (c2 > 13 and c2 < 15) or (c2 > 23 and c2 < 25) or (c2 > 33 and c2 < 35)""").show()
+    sql("""delete from dest2 where (c2 > 5 and c2 < 8) or (c2 > 15 and c2 < 18 ) or (c2 > 25 and c2 < 28) or (c2 > 35 and c2 < 38)""").show()
+    sql("""clean files for table dest2""")
+    checkAnswer(
+      sql("""select c1,c2,c3,c5 from dest2 order by c2"""),
+      Seq(Row("b", 2, "RGK", "Music"),
+        Row("c", 3, "cc", "ccc"),
+        Row("e", 5, "ee", "eee"),
+        Row("h", 8, "hh", "hhh"),
+        Row("i", 9, "ii", "iii"),
+        Row("j", 10, "jj", "jjj"),
+        Row("c", 13, "cc", "ccc"),
+        Row("e", 15, "ee", "eee"),
+        Row("h", 18, "hh", "hhh"),
+        Row("i", 19, "ii", "iii"),
+        Row("j", 20, "jj", "jjj"),
+        Row("c", 23, "cc", "ccc"),
+        Row("e", 25, "ee", "eee"),
+        Row("h", 28, "hh", "hhh"),
+        Row("i", 29, "ii", "iii"),
+        Row("j", 30, "jj", "jjj"),
+        Row("c", 33, "cc", "ccc"),
+        Row("e", 35, "ee", "eee"),
+        Row("h", 38, "hh", "hhh"),
+        Row("i", 39, "ii", "iii"),
+        Row("j", 40, "jj", "jjj"))
+    )
+    sql("""drop table dest2""")
+  }
+
+  test("test IUD Horizontal Compaction Check Column Cardinality") {
+    sql("""drop database if exists iud4 cascade""")
+    sql("""create database iud4""")
+    sql("""use iud4""")
+    sql(
+      """create table T_Carbn01(Active_status String,Item_type_cd INT,Qty_day_avg INT,Qty_total INT,Sell_price BIGINT,Sell_pricep DOUBLE,Discount_price DOUBLE,Profit DECIMAL(3,2),Item_code String,Item_name String,Outlet_name String,Update_time TIMESTAMP,Create_date String)STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/T_Hive1.csv' INTO table t_carbn01 options ('BAD_RECORDS_LOGGER_ENABLE' = 'FALSE', 'BAD_RECORDS_ACTION' = 'FORCE','DELIMITER'=',', 'QUOTECHAR'='\', 'FILEHEADER'='Active_status,Item_type_cd,Qty_day_avg,Qty_total,Sell_price,Sell_pricep,Discount_price,Profit,Item_code,Item_name,Outlet_name,Update_time,Create_date')""")
+    sql("""update t_carbn01 set (item_code) = ('Orange') where item_type_cd = 14""").show()
+    sql("""update t_carbn01 set (item_code) = ('Banana') where item_type_cd = 2""").show()
+    sql("""delete from t_carbn01 where item_code in ('RE3423ee','Orange','Banana')""").show()
+    checkAnswer(
+      sql("""select item_code from t_carbn01 where item_code not in ('RE3423ee','Orange','Banana')"""),
+      Seq(Row("SAD423ee"),
+        Row("DE3423ee"),
+        Row("SE3423ee"),
+        Row("SE3423ee"),
+        Row("SE3423ee"),
+        Row("SE3423ee"))
+    )
+    sql("""drop table t_carbn01""")
+  }
+
+
+  test("test IUD Horizontal Compaction Segment Delete Test Case") {
+    sql("""drop database if exists iud4 cascade""")
+    sql("""create database iud4""")
+    sql("""use iud4""")
+    sql(
+      """create table dest2 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""load data local inpath '$resourcesPath/IUD/comp1.csv' INTO table dest2""")
+    sql(s"""load data local inpath '$resourcesPath/IUD/comp2.csv' INTO table dest2""")
+    sql(s"""load data local inpath '$resourcesPath/IUD/comp3.csv' INTO table dest2""")
+    sql(s"""load data local inpath '$resourcesPath/IUD/comp4.csv' INTO table dest2""")
+    sql(
+      """delete from dest2 where (c2 < 3) or (c2 > 10 and c2 < 13) or (c2 > 20 and c2 < 23) or (c2 > 30 and c2 < 33)""").show()
+    sql("""DELETE SEGMENT 0 FROM TABLE dest2""")
+    sql("""clean files for table dest2""")
+    sql(
+      """update dest2 set (c5) = ('8RAM size') where (c2 > 3 and c2 < 5) or (c2 > 13 and c2 < 15) or (c2 > 23 and c2 < 25) or (c2 > 33 and c2 < 35)""")
+      .show()
+    checkAnswer(
+      sql("""select count(*) from dest2"""),
+      Seq(Row(24))
+    )
+    sql("""drop table dest2""")
+  }
+
+  test("test case full table delete") {
+    sql("""drop database if exists iud4 cascade""")
+    sql("""create database iud4""")
+    sql("""use iud4""")
+    sql(
+      """create table dest2 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""load data local inpath '$resourcesPath/IUD/comp1.csv' INTO table dest2""")
+    sql(s"""load data local inpath '$resourcesPath/IUD/comp2.csv' INTO table dest2""")
+    sql(s"""load data local inpath '$resourcesPath/IUD/comp3.csv' INTO table dest2""")
+    sql(s"""load data local inpath '$resourcesPath/IUD/comp4.csv' INTO table dest2""")
+    sql("""delete from dest2 where c2 < 41""").show()
+    sql("""alter table dest2 compact 'major'""")
+    checkAnswer(
+      sql("""select count(*) from dest2"""),
+      Seq(Row(0))
+    )
+    sql("""drop table dest2""")
+  }
+
+
+  override def afterAll {
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.ENABLE_VECTOR_READER , "true")
+    sql("use default")
+    sql("drop database if exists iud4 cascade")
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.isHorizontalCompactionEnabled , "true")
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/carbondata/blob/9d16d504/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
index 0ad700b..25fe91b 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
@@ -41,353 +41,357 @@ class UpdateCarbonTableTestCase extends QueryTest with BeforeAndAfterAll {
     sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/update01.csv' INTO TABLE iud.update_01 OPTIONS('BAD_RECORDS_LOGGER_ENABLE' = 'FALSE', 'BAD_RECORDS_ACTION' = 'FORCE') """)
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.isHorizontalCompactionEnabled , "true")
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.ENABLE_VECTOR_READER , "false")
   }
 
 
-//  test("test update operation with 0 rows updation.") {
-//    sql("""drop table if exists iud.zerorows""").show
-//    sql("""create table iud.zerorows (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-//    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.zerorows""")
-//    sql("""update zerorows d  set (d.c2) = (d.c2 + 1) where d.c1 = 'a'""").show()
-//    sql("""update zerorows d  set (d.c2) = (d.c2 + 1) where d.c1 = 'xxx'""").show()
-//     checkAnswer(
-//      sql("""select c1,c2,c3,c5 from iud.zerorows"""),
-//      Seq(Row("a",2,"aa","aaa"),Row("b",2,"bb","bbb"),Row("c",3,"cc","ccc"),Row("d",4,"dd","ddd"),Row("e",5,"ee","eee"))
-//    )
-//    sql("""drop table iud.zerorows""").show
-//
-//
-//  }
+  test("test update operation with 0 rows updation.") {
+    sql("""drop table if exists iud.zerorows""").show
+    sql("""create table iud.zerorows (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.zerorows""")
+    sql("""update zerorows d  set (d.c2) = (d.c2 + 1) where d.c1 = 'a'""").show()
+    sql("""update zerorows d  set (d.c2) = (d.c2 + 1) where d.c1 = 'xxx'""").show()
+    checkAnswer(
+      sql("""select c1,c2,c3,c5 from iud.zerorows"""),
+      Seq(Row("a",2,"aa","aaa"),Row("b",2,"bb","bbb"),Row("c",3,"cc","ccc"),Row("d",4,"dd","ddd"),Row("e",5,"ee","eee"))
+    )
+    sql("""drop table iud.zerorows""").show
+
+
+  }
 
 
   test("update carbon table[select from source table with where and exist]") {
-      sql("""drop table if exists iud.dest11""").show
-      sql("""create table iud.dest11 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-      sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest11""")
-      sql("""update iud.dest11 d set (d.c3, d.c5 ) = (select s.c33,s.c55 from iud.source2 s where d.c1 = s.c11) where 1 = 1""").show()
-      checkAnswer(
-        sql("""select c3,c5 from iud.dest11"""),
-        Seq(Row("cc","ccc"), Row("dd","ddd"),Row("ee","eee"), Row("MGM","Disco"),Row("RGK","Music"))
-      )
-      sql("""drop table iud.dest11""").show
-   }
-
-//   test("update carbon table[using destination table columns with where and exist]") {
-//    sql("""drop table if exists iud.dest22""")
-//    sql("""create table iud.dest22 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-//    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest22""")
-//    checkAnswer(
-//      sql("""select c2 from iud.dest22 where c1='a'"""),
-//      Seq(Row(1))
-//    )
-//    sql("""update dest22 d  set (d.c2) = (d.c2 + 1) where d.c1 = 'a'""").show()
-//    checkAnswer(
-//      sql("""select c2 from iud.dest22 where c1='a'"""),
-//      Seq(Row(2))
-//    )
-//    sql("""drop table iud.dest22""")
-//   }
-
-//   test("update carbon table without alias in set columns") {
-//      sql("""drop table iud.dest33""")
-//      sql("""create table iud.dest33 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-//      sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest33""")
-//      sql("""update iud.dest33 d set (c3,c5 ) = (select s.c33 ,s.c55  from iud.source2 s where d.c1 = s.c11) where d.c1 = 'a'""").show()
-//      checkAnswer(
-//        sql("""select c3,c5 from iud.dest33 where c1='a'"""),
-//        Seq(Row("MGM","Disco"))
-//      )
-//      sql("""drop table iud.dest33""")
-//  }
-//
-//  test("update carbon table without alias in set columns with mulitple loads") {
-//    sql("""drop table iud.dest33""")
-//    sql("""create table iud.dest33 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-//    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest33""")
-//    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest33""")
-//    sql("""update iud.dest33 d set (c3,c5 ) = (select s.c33 ,s.c55  from iud.source2 s where d.c1 = s.c11) where d.c1 = 'a'""").show()
-//    checkAnswer(
-//      sql("""select c3,c5 from iud.dest33 where c1='a'"""),
-//      Seq(Row("MGM","Disco"),Row("MGM","Disco"))
-//    )
-//    sql("""drop table iud.dest33""")
-//  }
-//
-//   test("update carbon table without alias in set three columns") {
-//     sql("""drop table iud.dest44""")
-//     sql("""create table iud.dest44 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-//     sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest44""")
-//     sql("""update iud.dest44 d set (c1,c3,c5 ) = (select s.c11, s.c33 ,s.c55  from iud.source2 s where d.c1 = s.c11) where d.c1 = 'a'""").show()
-//     checkAnswer(
-//       sql("""select c1,c3,c5 from iud.dest44 where c1='a'"""),
-//       Seq(Row("a","MGM","Disco"))
-//     )
-//     sql("""drop table iud.dest44""")
-//   }
-//
-//   test("update carbon table[single column select from source with where and exist]") {
-//      sql("""drop table iud.dest55""")
-//      sql("""create table iud.dest55 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-//      sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest55""")
-//     sql("""update iud.dest55 d set (c3)  = (select s.c33 from iud.source2 s where d.c1 = s.c11) where 1 = 1""").show()
-//      checkAnswer(
-//        sql("""select c1,c3 from iud.dest55 """),
-//        Seq(Row("a","MGM"),Row("b","RGK"),Row("c","cc"),Row("d","dd"),Row("e","ee"))
-//      )
-//      sql("""drop table iud.dest55""")
-//   }
-//
-//  test("update carbon table[single column SELECT from source with where and exist]") {
-//    sql("""drop table iud.dest55""")
-//    sql("""create table iud.dest55 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-//    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest55""")
-//    sql("""update iud.dest55 d set (c3)  = (SELECT s.c33 from iud.source2 s where d.c1 = s.c11) where 1 = 1""").show()
-//    checkAnswer(
-//      sql("""select c1,c3 from iud.dest55 """),
-//      Seq(Row("a","MGM"),Row("b","RGK"),Row("c","cc"),Row("d","dd"),Row("e","ee"))
-//    )
-//    sql("""drop table iud.dest55""")
-//  }
-//
-//   test("update carbon table[using destination table columns without where clause]") {
-//     sql("""drop table iud.dest66""")
-//     sql("""create table iud.dest66 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-//     sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest66""")
-//     sql("""update iud.dest66 d set (c2, c5 ) = (c2 + 1, concat(c5 , "z"))""").show()
-//     checkAnswer(
-//       sql("""select c2,c5 from iud.dest66 """),
-//       Seq(Row(2,"aaaz"),Row(3,"bbbz"),Row(4,"cccz"),Row(5,"dddz"),Row(6,"eeez"))
-//     )
-//     sql("""drop table iud.dest66""")
-//   }
-//
-//   test("update carbon table[using destination table columns with where clause]") {
-//       sql("""drop table iud.dest77""")
-//       sql("""create table iud.dest77 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-//       sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest77""")
-//       sql("""update iud.dest77 d set (c2, c5 ) = (c2 + 1, concat(c5 , "z")) where d.c3 = 'dd'""").show()
-//       checkAnswer(
-//         sql("""select c2,c5 from iud.dest77 where c3 = 'dd'"""),
-//         Seq(Row(5,"dddz"))
-//       )
-//       sql("""drop table iud.dest77""")
-//   }
-//
-//   test("update carbon table[using destination table( no alias) columns without where clause]") {
-//     sql("""drop table iud.dest88""")
-//     sql("""create table iud.dest88 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-//     sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest88""")
-//     sql("""update iud.dest88  set (c2, c5 ) = (c2 + 1, concat(c5 , "y" ))""").show()
-//     checkAnswer(
-//       sql("""select c2,c5 from iud.dest88 """),
-//       Seq(Row(2,"aaay"),Row(3,"bbby"),Row(4,"cccy"),Row(5,"dddy"),Row(6,"eeey"))
-//     )
-//     sql("""drop table iud.dest88""")
-//   }
-//
-//   test("update carbon table[using destination table columns with hard coded value ]") {
-//     sql("""drop table iud.dest99""")
-//     sql("""create table iud.dest99 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-//     sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest99""")
-//     sql("""update iud.dest99 d set (c2, c5 ) = (c2 + 1, "xyx")""").show()
-//     checkAnswer(
-//       sql("""select c2,c5 from iud.dest99 """),
-//       Seq(Row(2,"xyx"),Row(3,"xyx"),Row(4,"xyx"),Row(5,"xyx"),Row(6,"xyx"))
-//     )
-//     sql("""drop table iud.dest99""")
-//   }
-//
-//   test("update carbon tableusing destination table columns with hard coded value and where condition]") {
-//     sql("""drop table iud.dest110""")
-//     sql("""create table iud.dest110 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-//     sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest110""")
-//     sql("""update iud.dest110 d set (c2, c5 ) = (c2 + 1, "xyx") where d.c1 = 'e'""").show()
-//     checkAnswer(
-//       sql("""select c2,c5 from iud.dest110 where c1 = 'e' """),
-//       Seq(Row(6,"xyx"))
-//     )
-//     sql("""drop table iud.dest110""")
-//   }
-//
-//   test("update carbon table[using source  table columns with where and exist and no destination table condition]") {
-//     sql("""drop table iud.dest120""")
-//     sql("""create table iud.dest120 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-//     sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest120""")
-//     sql("""update iud.dest120 d  set (c3, c5 ) = (select s.c33 ,s.c55  from iud.source2 s where d.c1 = s.c11)""").show()
-//     checkAnswer(
-//       sql("""select c3,c5 from iud.dest120 """),
-//       Seq(Row("MGM","Disco"),Row("RGK","Music"),Row("cc","ccc"),Row("dd","ddd"),Row("ee","eee"))
-//     )
-//     sql("""drop table iud.dest120""")
-//   }
-//
-//   test("update carbon table[using destination table where and exist]") {
-//     sql("""drop table iud.dest130""")
-//     sql("""create table iud.dest130 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-//     sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest130""")
-//     sql("""update iud.dest130 dd  set (c2, c5 ) = (c2 + 1, "xyx")  where dd.c1 = 'a'""").show()
-//     checkAnswer(
-//       sql("""select c2,c5 from iud.dest130 where c1 = 'a' """),
-//       Seq(Row(2,"xyx"))
-//     )
-//     sql("""drop table iud.dest130""")
-//   }
-//
-//   test("update carbon table[using destination table (concat) where and exist]") {
-//     sql("""drop table iud.dest140""")
-//     sql("""create table iud.dest140 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-//     sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest140""")
-//     sql("""update iud.dest140 d set (c2, c5 ) = (c2 + 1, concat(c5 , "z"))  where d.c1 = 'a'""").show()
-//     checkAnswer(
-//       sql("""select c2,c5 from iud.dest140 where c1 = 'a'"""),
-//       Seq(Row(2,"aaaz"))
-//     )
-//     sql("""drop table iud.dest140""")
-//   }
-//
-//   test("update carbon table[using destination table (concat) with  where") {
-//     sql("""drop table iud.dest150""")
-//     sql("""create table iud.dest150 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-//     sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest150""")
-//     sql("""update iud.dest150 d set (c5) = (concat(c5 , "z"))  where d.c1 = 'b'""").show()
-//     checkAnswer(
-//       sql("""select c5 from iud.dest150 where c1 = 'b' """),
-//       Seq(Row("bbbz"))
-//     )
-//     sql("""drop table iud.dest150""")
-//   }
-//
-//  test("update table with data for datatype mismatch with column ") {
-//    sql("""update iud.update_01 set (imei) = ('skt') where level = 'aaa'""")
-//    checkAnswer(
-//      sql("""select * from iud.update_01 where imei = 'skt'"""),
-//      Seq()
-//    )
-//  }
-//
-//   test("update carbon table-error[more columns in source table not allowed") {
-//     val exception = intercept[Exception] {
-//       sql("""update iud.dest d set (c2, c5 ) = (c2 + 1, concat(c5 , "z"), "abc")""").show()
-//     }
-//     assertResult("Number of source and destination columns are not matching")(exception.getMessage)
-//   }
-//
-//   test("update carbon table-error[no set columns") {
-//     intercept[Exception] {
-//       sql("""update iud.dest d set () = ()""").show()
-//     }
-//   }
-//
-//   test("update carbon table-error[no set columns with updated column") {
-//     intercept[Exception] {
-//       sql("""update iud.dest d set  = (c1+1)""").show()
-//     }
-//   }
-//   test("update carbon table-error[one set column with two updated column") {
-//     intercept[Exception] {
-//       sql("""update iud.dest  set c2 = (c2 + 1, concat(c5 , "z") )""").show()
-//     }
-//   }
-//
-// test("""update carbon [special characters  in value- test parsing logic ]""") {
-//    sql("""drop table iud.dest160""")
-//    sql("""create table iud.dest160 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-//    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest160""")
-//    sql("""update iud.dest160 set(c1) = ("ab\')$*)(&^)")""").show()
-//    sql("""update iud.dest160 set(c1) =  ('abd$asjdh$adasj$l;sdf$*)$*)(&^')""").show()
-//    sql("""update iud.dest160 set(c1) =("\\")""").show()
-//    sql("""update iud.dest160 set(c1) = ("ab\')$*)(&^)")""").show()
-//    sql("""update iud.dest160 d set (c3,c5)=(select s.c33,'a\\a' from iud.source2 s where d.c1 = s.c11 and d.c2 = s.c22) where  d.c2 between 1 and 3""").show()
-//    sql("""update iud.dest160 d set (c3,c5)=(select s.c33,'\\' from iud.source2 s where d.c1 = s.c11 and d.c2 = s.c22) where  d.c2 between 1 and 3""").show()
-//    sql("""update iud.dest160 d set (c3,c5)=(select s.c33,'\\a' from iud.source2 s where d.c1 = s.c11 and d.c2 = s.c22) where  d.c2 between 1 and 3""").show()
-//    sql("""update iud.dest160 d set (c3,c5)      =     (select s.c33,'a\\a\\' from iud.source2 s where d.c1 = s.c11 and d.c2 = s.c22) where  d.c2 between 1 and 3""").show()
-//    sql("""update iud.dest160 d set (c3,c5) =(select s.c33,'a\'a\\' from iud.source2 s where d.c1 = s.c11 and d.c2 = s.c22) where  d.c2 between 1 and 3""").show()
-//    sql("""update iud.dest160 d set (c3,c5)=(select s.c33,'\\a\'a\"' from iud.source2 s where d.c1 = s.c11 and d.c2 = s.c22) where  d.c2 between 1 and 3""").show()
-//    sql("""drop table iud.dest160""")
-//  }
-//
-//  test("""update carbon [sub query, between and existing in outer condition.(Customer query ) ]""") {
-//    sql("""drop table iud.dest170""")
-//    sql("""create table iud.dest170 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-//    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest170""")
-//    sql("""update iud.dest170 d set (c3)=(select s.c33 from iud.source2 s where d.c1 = s.c11 and d.c2 = s.c22) where  d.c2 between 1 and 3""").show()
-//    checkAnswer(
-//      sql("""select c3 from  iud.dest170 as d where d.c2 between 1 and 3"""),
-//      Seq(Row("MGM"), Row("RGK"), Row("cc"))
-//    )
-//    sql("""drop table iud.dest170""")
-//  }
-//
-//  test("""update carbon [self join select query ]""") {
-//    sql("""drop table iud.dest171""")
-//    sql("""create table iud.dest171 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-//    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest171""")
-//    sql("""update iud.dest171 d set (c3)=(select concat(s.c3 , "z") from iud.dest171 s where d.c2 = s.c2)""").show
-//    sql("""drop table iud.dest172""")
-//    sql("""create table iud.dest172 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-//    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest172""")
-//    sql("""update iud.dest172 d set (c3)=( concat(c3 , "z"))""").show
-//    checkAnswer(
-//      sql("""select c3 from  iud.dest171"""),
-//      sql("""select c3 from  iud.dest172""")
-//    )
-//    sql("""drop table iud.dest171""")
-//    sql("""drop table iud.dest172""")
-//  }
-//
-//  test("update carbon table-error[closing bracket missed") {
-//    intercept[Exception] {
-//      sql("""update iud.dest d set (c2) = (194""").show()
-//    }
-//  }
-//
-//  test("update carbon table-error[starting bracket missed") {
-//    intercept[Exception] {
-//      sql("""update iud.dest d set (c2) = 194)""").show()
-//    }
-//  }
-//
-//  test("update carbon table-error[missing starting and closing bracket") {
-//    intercept[Exception] {
-//      sql("""update iud.dest d set (c2) = 194""").show()
-//    }
-//  }
-//
-//  test("test create table with column name as tupleID"){
-//    intercept[Exception] {
-//      sql("CREATE table carbontable (empno int, tupleID String, " +
-//          "designation String, doj Timestamp, workgroupcategory int, " +
-//          "workgroupcategoryname String, deptno int, deptname String, projectcode int, " +
-//          "projectjoindate Timestamp, projectenddate Timestamp, attendance int, " +
-//          "utilization int,salary int) STORED BY 'org.apache.carbondata.format' " +
-//          "TBLPROPERTIES('DICTIONARY_INCLUDE'='empno,workgroupcategory,deptno,projectcode'," +
-//          "'DICTIONARY_EXCLUDE'='empname')")
-//    }
-//  }
-//
-//  test("Failure of update operation due to bad record with proper error message") {
-//    try {
-//      CarbonProperties.getInstance()
-//        .addProperty(CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION, "FAIL")
-//      val errorMessage = intercept[Exception] {
-//        sql("drop table if exists update_with_bad_record")
-//        sql("create table update_with_bad_record(item int, name String) stored by 'carbondata'")
-//        sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/IUD/bad_record.csv' into table " +
-//            s"update_with_bad_record")
-//        sql("update update_with_bad_record set (item)=(3.45)").show()
-//        sql("drop table if exists update_with_bad_record")
-//      }
-//      assert(errorMessage.getMessage.contains("Data load failed due to bad record"))
-//    } finally {
-//      CarbonProperties.getInstance()
-//        .addProperty(CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION, "FORCE")
-//    }
-//  }
+    sql("""drop table if exists iud.dest11""").show
+    sql("""create table iud.dest11 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest11""")
+    sql("""update iud.dest11 d set (d.c3, d.c5 ) = (select s.c33,s.c55 from iud.source2 s where d.c1 = s.c11) where 1 = 1""").show()
+    checkAnswer(
+      sql("""select c3,c5 from iud.dest11"""),
+      Seq(Row("cc","ccc"), Row("dd","ddd"),Row("ee","eee"), Row("MGM","Disco"),Row("RGK","Music"))
+    )
+    sql("""drop table iud.dest11""").show
+  }
+
+  test("update carbon table[using destination table columns with where and exist]") {
+    sql("""drop table if exists iud.dest22""")
+    sql("""create table iud.dest22 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest22""")
+    checkAnswer(
+      sql("""select c2 from iud.dest22 where c1='a'"""),
+      Seq(Row(1))
+    )
+    sql("""update dest22 d  set (d.c2) = (d.c2 + 1) where d.c1 = 'a'""").show()
+    checkAnswer(
+      sql("""select c2 from iud.dest22 where c1='a'"""),
+      Seq(Row(2))
+    )
+    sql("""drop table if exists iud.dest22""")
+  }
+
+  test("update carbon table without alias in set columns") {
+    sql("""drop table if exists iud.dest33""")
+    sql("""create table iud.dest33 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest33""")
+    sql("""update iud.dest33 d set (c3,c5 ) = (select s.c33 ,s.c55  from iud.source2 s where d.c1 = s.c11) where d.c1 = 'a'""").show()
+    checkAnswer(
+      sql("""select c3,c5 from iud.dest33 where c1='a'"""),
+      Seq(Row("MGM","Disco"))
+    )
+    sql("""drop table if exists iud.dest33""")
+  }
+
+  test("update carbon table without alias in set columns with mulitple loads") {
+    sql("""drop table if exists iud.dest33""")
+    sql("""create table iud.dest33 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest33""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest33""")
+    sql("""update iud.dest33 d set (c3,c5 ) = (select s.c33 ,s.c55  from iud.source2 s where d.c1 = s.c11) where d.c1 = 'a'""").show()
+    checkAnswer(
+      sql("""select c3,c5 from iud.dest33 where c1='a'"""),
+      Seq(Row("MGM","Disco"),Row("MGM","Disco"))
+    )
+    sql("""drop table if exists iud.dest33""")
+  }
+
+  test("update carbon table without alias in set three columns") {
+    sql("""drop table if exists iud.dest44""")
+    sql("""create table iud.dest44 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest44""")
+    sql("""update iud.dest44 d set (c1,c3,c5 ) = (select s.c11, s.c33 ,s.c55  from iud.source2 s where d.c1 = s.c11) where d.c1 = 'a'""").show()
+    checkAnswer(
+      sql("""select c1,c3,c5 from iud.dest44 where c1='a'"""),
+      Seq(Row("a","MGM","Disco"))
+    )
+    sql("""drop table if exists iud.dest44""")
+  }
+
+  test("update carbon table[single column select from source with where and exist]") {
+    sql("""drop table if exists iud.dest55""")
+    sql("""create table iud.dest55 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest55""")
+    sql("""update iud.dest55 d set (c3)  = (select s.c33 from iud.source2 s where d.c1 = s.c11) where 1 = 1""").show()
+    checkAnswer(
+      sql("""select c1,c3 from iud.dest55 """),
+      Seq(Row("a","MGM"),Row("b","RGK"),Row("c","cc"),Row("d","dd"),Row("e","ee"))
+    )
+    sql("""drop table if exists iud.dest55""")
+  }
+
+  test("update carbon table[single column SELECT from source with where and exist]") {
+    sql("""drop table if exists iud.dest55""")
+    sql("""create table iud.dest55 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest55""")
+    sql("""update iud.dest55 d set (c3)  = (SELECT s.c33 from iud.source2 s where d.c1 = s.c11) where 1 = 1""").show()
+    checkAnswer(
+      sql("""select c1,c3 from iud.dest55 """),
+      Seq(Row("a","MGM"),Row("b","RGK"),Row("c","cc"),Row("d","dd"),Row("e","ee"))
+    )
+    sql("""drop table if exists iud.dest55""")
+  }
+
+  test("update carbon table[using destination table columns without where clause]") {
+    sql("""drop table if exists iud.dest66""")
+    sql("""create table iud.dest66 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest66""")
+    sql("""update iud.dest66 d set (c2, c5 ) = (c2 + 1, concat(c5 , "z"))""").show()
+    checkAnswer(
+      sql("""select c2,c5 from iud.dest66 """),
+      Seq(Row(2,"aaaz"),Row(3,"bbbz"),Row(4,"cccz"),Row(5,"dddz"),Row(6,"eeez"))
+    )
+    sql("""drop table if exists iud.dest66""")
+  }
+
+  test("update carbon table[using destination table columns with where clause]") {
+    sql("""drop table if exists iud.dest77""")
+    sql("""create table iud.dest77 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest77""")
+    sql("""update iud.dest77 d set (c2, c5 ) = (c2 + 1, concat(c5 , "z")) where d.c3 = 'dd'""").show()
+    checkAnswer(
+      sql("""select c2,c5 from iud.dest77 where c3 = 'dd'"""),
+      Seq(Row(5,"dddz"))
+    )
+    sql("""drop table if exists iud.dest77""")
+  }
+
+  test("update carbon table[using destination table( no alias) columns without where clause]") {
+    sql("""drop table if exists iud.dest88""")
+    sql("""create table iud.dest88 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest88""")
+    sql("""update iud.dest88  set (c2, c5 ) = (c2 + 1, concat(c5 , "y" ))""").show()
+    checkAnswer(
+      sql("""select c2,c5 from iud.dest88 """),
+      Seq(Row(2,"aaay"),Row(3,"bbby"),Row(4,"cccy"),Row(5,"dddy"),Row(6,"eeey"))
+    )
+    sql("""drop table if exists iud.dest88""")
+  }
+
+  test("update carbon table[using destination table columns with hard coded value ]") {
+    sql("""drop table if exists iud.dest99""")
+    sql("""create table iud.dest99 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest99""")
+    sql("""update iud.dest99 d set (c2, c5 ) = (c2 + 1, "xyx")""").show()
+    checkAnswer(
+      sql("""select c2,c5 from iud.dest99 """),
+      Seq(Row(2,"xyx"),Row(3,"xyx"),Row(4,"xyx"),Row(5,"xyx"),Row(6,"xyx"))
+    )
+    sql("""drop table if exists iud.dest99""")
+  }
+
+  test("update carbon tableusing destination table columns with hard coded value and where condition]") {
+    sql("""drop table if exists iud.dest110""")
+    sql("""create table iud.dest110 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest110""")
+    sql("""update iud.dest110 d set (c2, c5 ) = (c2 + 1, "xyx") where d.c1 = 'e'""").show()
+    checkAnswer(
+      sql("""select c2,c5 from iud.dest110 where c1 = 'e' """),
+      Seq(Row(6,"xyx"))
+    )
+    sql("""drop table iud.dest110""")
+  }
+
+  test("update carbon table[using source  table columns with where and exist and no destination table condition]") {
+    sql("""drop table if exists iud.dest120""")
+    sql("""create table iud.dest120 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest120""")
+    sql("""update iud.dest120 d  set (c3, c5 ) = (select s.c33 ,s.c55  from iud.source2 s where d.c1 = s.c11)""").show()
+    checkAnswer(
+      sql("""select c3,c5 from iud.dest120 """),
+      Seq(Row("MGM","Disco"),Row("RGK","Music"),Row("cc","ccc"),Row("dd","ddd"),Row("ee","eee"))
+    )
+    sql("""drop table iud.dest120""")
+  }
+
+  test("update carbon table[using destination table where and exist]") {
+    sql("""drop table if exists iud.dest130""")
+    sql("""create table iud.dest130 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest130""")
+    sql("""update iud.dest130 dd  set (c2, c5 ) = (c2 + 1, "xyx")  where dd.c1 = 'a'""").show()
+    checkAnswer(
+      sql("""select c2,c5 from iud.dest130 where c1 = 'a' """),
+      Seq(Row(2,"xyx"))
+    )
+    sql("""drop table iud.dest130""")
+  }
+
+  test("update carbon table[using destination table (concat) where and exist]") {
+    sql("""drop table if exists iud.dest140""")
+    sql("""create table iud.dest140 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest140""")
+    sql("""update iud.dest140 d set (c2, c5 ) = (c2 + 1, concat(c5 , "z"))  where d.c1 = 'a'""").show()
+    checkAnswer(
+      sql("""select c2,c5 from iud.dest140 where c1 = 'a'"""),
+      Seq(Row(2,"aaaz"))
+    )
+    sql("""drop table iud.dest140""")
+  }
+
+  test("update carbon table[using destination table (concat) with  where") {
+    sql("""drop table if exists iud.dest150""")
+    sql("""create table iud.dest150 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest150""")
+    sql("""update iud.dest150 d set (c5) = (concat(c5 , "z"))  where d.c1 = 'b'""").show()
+    checkAnswer(
+      sql("""select c5 from iud.dest150 where c1 = 'b' """),
+      Seq(Row("bbbz"))
+    )
+    sql("""drop table iud.dest150""")
+  }
+
+  test("update table with data for datatype mismatch with column ") {
+    sql("""update iud.update_01 set (imei) = ('skt') where level = 'aaa'""")
+    checkAnswer(
+      sql("""select * from iud.update_01 where imei = 'skt'"""),
+      Seq()
+    )
+  }
+
+  test("update carbon table-error[more columns in source table not allowed") {
+    val exception = intercept[Exception] {
+      sql("""update iud.dest d set (c2, c5 ) = (c2 + 1, concat(c5 , "z"), "abc")""").show()
+    }
+    assertResult("Number of source and destination columns are not matching")(exception.getMessage)
+  }
+
+  test("update carbon table-error[no set columns") {
+    intercept[Exception] {
+      sql("""update iud.dest d set () = ()""").show()
+    }
+  }
+
+  test("update carbon table-error[no set columns with updated column") {
+    intercept[Exception] {
+      sql("""update iud.dest d set  = (c1+1)""").show()
+    }
+  }
+  test("update carbon table-error[one set column with two updated column") {
+    intercept[Exception] {
+      sql("""update iud.dest  set c2 = (c2 + 1, concat(c5 , "z") )""").show()
+    }
+  }
+
+  test("""update carbon [special characters  in value- test parsing logic ]""") {
+    sql("""drop table if exists iud.dest160""")
+    sql("""create table iud.dest160 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest160""")
+    sql("""update iud.dest160 set(c1) = ("ab\')$*)(&^)")""").show()
+    sql("""update iud.dest160 set(c1) =  ('abd$asjdh$adasj$l;sdf$*)$*)(&^')""").show()
+    sql("""update iud.dest160 set(c1) =("\\")""").show()
+    sql("""update iud.dest160 set(c1) = ("ab\')$*)(&^)")""").show()
+    sql("""update iud.dest160 d set (c3,c5)=(select s.c33,'a\\a' from iud.source2 s where d.c1 = s.c11 and d.c2 = s.c22) where  d.c2 between 1 and 3""").show()
+    sql("""update iud.dest160 d set (c3,c5)=(select s.c33,'\\' from iud.source2 s where d.c1 = s.c11 and d.c2 = s.c22) where  d.c2 between 1 and 3""").show()
+    sql("""update iud.dest160 d set (c3,c5)=(select s.c33,'\\a' from iud.source2 s where d.c1 = s.c11 and d.c2 = s.c22) where  d.c2 between 1 and 3""").show()
+    sql("""update iud.dest160 d set (c3,c5)      =     (select s.c33,'a\\a\\' from iud.source2 s where d.c1 = s.c11 and d.c2 = s.c22) where  d.c2 between 1 and 3""").show()
+    sql("""update iud.dest160 d set (c3,c5) =(select s.c33,'a\'a\\' from iud.source2 s where d.c1 = s.c11 and d.c2 = s.c22) where  d.c2 between 1 and 3""").show()
+    sql("""update iud.dest160 d set (c3,c5)=(select s.c33,'\\a\'a\"' from iud.source2 s where d.c1 = s.c11 and d.c2 = s.c22) where  d.c2 between 1 and 3""").show()
+    sql("""drop table iud.dest160""")
+  }
+
+  test("""update carbon [sub query, between and existing in outer condition.(Customer query ) ]""") {
+    sql("""drop table if exists iud.dest170""")
+    sql("""create table iud.dest170 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest170""")
+    sql("""update iud.dest170 d set (c3)=(select s.c33 from iud.source2 s where d.c1 = s.c11 and d.c2 = s.c22) where  d.c2 between 1 and 3""").show()
+    checkAnswer(
+      sql("""select c3 from  iud.dest170 as d where d.c2 between 1 and 3"""),
+      Seq(Row("MGM"), Row("RGK"), Row("cc"))
+    )
+    sql("""drop table iud.dest170""")
+  }
+
+  test("""update carbon [self join select query ]""") {
+    sql("""drop table if exists iud.dest171""")
+    sql("""create table iud.dest171 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest171""")
+    sql("""update iud.dest171 d set (c3)=(select concat(s.c3 , "z") from iud.dest171 s where d.c2 = s.c2)""").show
+    sql("""drop table if exists iud.dest172""")
+    sql("""create table iud.dest172 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest172""")
+    sql("""update iud.dest172 d set (c3)=( concat(c3 , "z"))""").show
+    checkAnswer(
+      sql("""select c3 from  iud.dest171"""),
+      sql("""select c3 from  iud.dest172""")
+    )
+    sql("""drop table iud.dest171""")
+    sql("""drop table iud.dest172""")
+  }
+
+  test("update carbon table-error[closing bracket missed") {
+    intercept[Exception] {
+      sql("""update iud.dest d set (c2) = (194""").show()
+    }
+  }
+
+  test("update carbon table-error[starting bracket missed") {
+    intercept[Exception] {
+      sql("""update iud.dest d set (c2) = 194)""").show()
+    }
+  }
+
+  test("update carbon table-error[missing starting and closing bracket") {
+    intercept[Exception] {
+      sql("""update iud.dest d set (c2) = 194""").show()
+    }
+  }
+
+  test("test create table with column name as tupleID"){
+    intercept[Exception] {
+      sql("CREATE table carbontable (empno int, tupleID String, " +
+          "designation String, doj Timestamp, workgroupcategory int, " +
+          "workgroupcategoryname String, deptno int, deptname String, projectcode int, " +
+          "projectjoindate Timestamp, projectenddate Timestamp, attendance int, " +
+          "utilization int,salary int) STORED BY 'org.apache.carbondata.format' " +
+          "TBLPROPERTIES('DICTIONARY_INCLUDE'='empno,workgroupcategory,deptno,projectcode'," +
+          "'DICTIONARY_EXCLUDE'='empname')")
+    }
+  }
+
+  test("Failure of update operation due to bad record with proper error message") {
+    try {
+      CarbonProperties.getInstance()
+        .addProperty(CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION, "FAIL")
+      val errorMessage = intercept[Exception] {
+        sql("drop table if exists update_with_bad_record")
+        sql("create table update_with_bad_record(item int, name String) stored by 'carbondata'")
+        sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/IUD/bad_record.csv' into table " +
+            s"update_with_bad_record")
+        sql("update update_with_bad_record set (item)=(3.45)").show()
+        sql("drop table if exists update_with_bad_record")
+      }
+      assert(errorMessage.getMessage.contains("Data load failed due to bad record"))
+    } finally {
+      CarbonProperties.getInstance()
+        .addProperty(CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION, "FORCE")
+    }
+  }
 
   override def afterAll {
-//    sql("use default")
-//    sql("drop database  if exists iud cascade")
+    sql("use default")
+    sql("drop database  if exists iud cascade")
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.isHorizontalCompactionEnabled , "true")
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.ENABLE_VECTOR_READER , "true")
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/9d16d504/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala
deleted file mode 100644
index 93da343..0000000
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.carbondata.spark.testsuite.iud
-
-import org.apache.spark.sql.Row
-import org.apache.spark.sql.common.util.QueryTest
-import org.scalatest.BeforeAndAfterAll
-
-class DeleteCarbonTableTestCase extends QueryTest with BeforeAndAfterAll {
-  override def beforeAll {
-
-    sql("use default")
-    sql("drop database  if exists iud_db cascade")
-    sql("create database  iud_db")
-
-    sql("""create table iud_db.source2 (c11 string,c22 int,c33 string,c55 string, c66 int) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/source2.csv' INTO table iud_db.source2""")
-    sql("use iud_db")
-  }
-  test("delete data from carbon table with alias [where clause ]") {
-    sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud_db.dest""")
-    sql("""delete from iud_db.dest d where d.c1 = 'a'""").show
-    checkAnswer(
-      sql("""select c2 from iud_db.dest"""),
-      Seq(Row(2), Row(3),Row(4), Row(5))
-    )
-  }
-  test("delete data from  carbon table[where clause ]") {
-    sql("""drop table if exists iud_db.dest""")
-    sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud_db.dest""")
-    sql("""delete from dest where c2 = 2""").show
-    checkAnswer(
-      sql("""select c1 from dest"""),
-      Seq(Row("a"), Row("c"), Row("d"), Row("e"))
-    )
-  }
-  test("delete data from  carbon table[where IN  ]") {
-    sql("""drop table if exists iud_db.dest""")
-    sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud_db.dest""")
-    sql("""delete from dest where c1 IN ('d', 'e')""").show
-    checkAnswer(
-      sql("""select c1 from dest"""),
-      Seq(Row("a"), Row("b"),Row("c"))
-    )
-  }
-
-  test("delete data from  carbon table[with alias No where clause]") {
-    sql("""drop table if exists iud_db.dest""")
-    sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud_db.dest""")
-    sql("""delete from iud_db.dest a""").show
-    checkAnswer(
-      sql("""select c1 from iud_db.dest"""),
-      Seq()
-    )
-  }
-  test("delete data from  carbon table[No alias No where clause]") {
-    sql("""drop table if exists iud_db.dest""")
-    sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud_db.dest""")
-    sql("""delete from dest""").show()
-    checkAnswer(
-      sql("""select c1 from dest"""),
-      Seq()
-    )
-  }
-
-  test("delete data from  carbon table[ JOIN with another table ]") {
-    sql("""drop table if exists iud_db.dest""")
-    sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud_db.dest""")
-    sql(""" DELETE FROM dest t1 INNER JOIN source2 t2 ON t1.c1 = t2.c11""").show(truncate = false)
-    checkAnswer(
-      sql("""select c1 from iud_db.dest"""),
-      Seq(Row("c"), Row("d"), Row("e"))
-    )
-  }
-
-//  test("delete data from  carbon table[where IN (sub query) ]") {
-//    sql("""drop table if exists iud_db.dest""")
-//    sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""").show()
-//    sql("""LOAD DATA LOCAL INPATH './src/test/resources/IUD/dest.csv' INTO table iud_db.dest""")
-//    sql("""delete from  iud_db.dest where c1 IN (select c11 from source2)""").show(truncate = false)
-//    checkAnswer(
-//      sql("""select c1 from iud_db.dest"""),
-//      Seq(Row("c"), Row("d"), Row("e"))
-//    )
-//  }
-//  test("delete data from  carbon table[where IN (sub query with where clause) ]") {
-//    sql("""drop table if exists iud_db.dest""")
-//    sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""").show()
-//    sql("""LOAD DATA LOCAL INPATH './src/test/resources/IUD/dest.csv' INTO table iud_db.dest""")
-//    sql("""delete from  iud_db.dest where c1 IN (select c11 from source2 where c11 = 'b')""").show()
-//    checkAnswer(
-//      sql("""select c1 from iud_db.dest"""),
-//      Seq(Row("a"), Row("c"), Row("d"), Row("e"))
-//    )
-//  }
-  test("delete data from  carbon table[where numeric condition  ]") {
-    sql("""drop table if exists iud_db.dest""")
-    sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud_db.dest""")
-    sql("""delete from  iud_db.dest where c2 >= 4""").show()
-    checkAnswer(
-      sql("""select count(*) from iud_db.dest"""),
-      Seq(Row(3))
-    )
-  }
-  override def afterAll {
-    sql("use default")
-    sql("drop database  if exists iud_db cascade")
-  }
-}
\ No newline at end of file


[33/42] carbondata git commit: updated timeout message

Posted by ra...@apache.org.
updated timeout message


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/42ad4ab2
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/42ad4ab2
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/42ad4ab2

Branch: refs/heads/branch-1.1
Commit: 42ad4ab22c8dfc0fc7cd044470ad47e6d436fc11
Parents: 809d880
Author: kunal642 <ku...@knoldus.in>
Authored: Thu May 25 16:06:39 2017 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 13:26:07 2017 +0530

----------------------------------------------------------------------
 .../client/DictionaryClientHandler.java         |  9 ++++++++-
 .../dictionary/client/DictionaryClientTest.java | 20 ++++++++++++++++++++
 2 files changed, 28 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/42ad4ab2/core/src/main/java/org/apache/carbondata/core/dictionary/client/DictionaryClientHandler.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/dictionary/client/DictionaryClientHandler.java b/core/src/main/java/org/apache/carbondata/core/dictionary/client/DictionaryClientHandler.java
index 1ed8b36..9922523 100644
--- a/core/src/main/java/org/apache/carbondata/core/dictionary/client/DictionaryClientHandler.java
+++ b/core/src/main/java/org/apache/carbondata/core/dictionary/client/DictionaryClientHandler.java
@@ -91,7 +91,14 @@ public class DictionaryClientHandler extends ChannelInboundHandlerAdapter {
     try {
       dictionaryMessage = responseMsgQueue.poll(100, TimeUnit.SECONDS);
       if (dictionaryMessage == null) {
-        throw new RuntimeException("Request timed out for key : " + key);
+        StringBuilder message = new StringBuilder();
+        message.append("DictionaryMessage { ColumnName: ")
+            .append(key.getColumnName())
+            .append(", DictionaryValue: ")
+            .append(key.getDictionaryValue())
+            .append(", type: ")
+            .append(key.getType());
+        throw new RuntimeException("Request timed out for key : " + message);
       }
       return dictionaryMessage;
     } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/42ad4ab2/core/src/test/java/org/apache/carbondata/core/dictionary/client/DictionaryClientTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/carbondata/core/dictionary/client/DictionaryClientTest.java b/core/src/test/java/org/apache/carbondata/core/dictionary/client/DictionaryClientTest.java
index a96e364..60d3c26 100644
--- a/core/src/test/java/org/apache/carbondata/core/dictionary/client/DictionaryClientTest.java
+++ b/core/src/test/java/org/apache/carbondata/core/dictionary/client/DictionaryClientTest.java
@@ -19,6 +19,8 @@ package org.apache.carbondata.core.dictionary.client;
 
 import java.io.File;
 import java.util.Arrays;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.carbondata.core.constants.CarbonCommonConstants;
 import org.apache.carbondata.core.dictionary.generator.key.DictionaryMessage;
@@ -33,6 +35,8 @@ import org.apache.carbondata.core.metadata.schema.table.column.CarbonDimension;
 import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema;
 import org.apache.carbondata.core.util.CarbonProperties;
 
+import mockit.Mock;
+import mockit.MockUp;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -160,6 +164,22 @@ public class DictionaryClientTest {
     // Shutdown the server
   }
 
+  @Test public void testToCheckIfCorrectTimeOutExceptionMessageIsThrown() {
+    new MockUp<LinkedBlockingQueue<DictionaryMessage>>() {
+      @SuppressWarnings("unused")
+      @Mock
+      DictionaryMessage poll(long timeout, TimeUnit unit) throws InterruptedException {
+        return null;
+      }
+    };
+    try {
+      testClient();
+      Assert.fail();
+    } catch (Exception e) {
+      Assert.assertFalse(e.getMessage().contains("data"));
+    }
+  }
+
   @After public void tearDown() {
     // Cleanup created files
     CarbonMetadata.getInstance().removeTable(tableInfo.getTableUniqueName());


[13/42] carbondata git commit: Corrected test case

Posted by ra...@apache.org.
Corrected test case


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/959e851a
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/959e851a
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/959e851a

Branch: refs/heads/branch-1.1
Commit: 959e851aa53ed3a7e7572c847e9937d9397eadd5
Parents: d734f53
Author: ravipesala <ra...@gmail.com>
Authored: Mon May 22 14:29:10 2017 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 12:57:02 2017 +0530

----------------------------------------------------------------------
 .../spark/testsuite/dataload/TestBatchSortDataLoad.scala       | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/959e851a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestBatchSortDataLoad.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestBatchSortDataLoad.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestBatchSortDataLoad.scala
index 70007c6..43bcac8 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestBatchSortDataLoad.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestBatchSortDataLoad.scala
@@ -86,7 +86,7 @@ class TestBatchSortDataLoad extends QueryTest with BeforeAndAfterAll {
 
     checkAnswer(sql("select count(*) from carbon_load1"), Seq(Row(200000)))
 
-    assert(getIndexfileCount("carbon_load1") == 12, "Something wrong in batch sort")
+    assert(getIndexfileCount("carbon_load1") == 10, "Something wrong in batch sort")
   }
 
   test("test batch sort load by passing option to load command and compare with normal load") {
@@ -167,7 +167,7 @@ class TestBatchSortDataLoad extends QueryTest with BeforeAndAfterAll {
 
     checkAnswer(sql("select count(*) from carbon_load3"), Seq(Row(200000)))
 
-    assert(getIndexfileCount("carbon_load3") == 12, "Something wrong in batch sort")
+    assert(getIndexfileCount("carbon_load3") == 10, "Something wrong in batch sort")
 
     checkAnswer(sql("select * from carbon_load3 where c1='a1' order by c1"),
       sql("select * from carbon_load2 where c1='a1' order by c1"))
@@ -188,7 +188,7 @@ class TestBatchSortDataLoad extends QueryTest with BeforeAndAfterAll {
 
     checkAnswer(sql("select count(*) from carbon_load4"), Seq(Row(200000)))
 
-    assert(getIndexfileCount("carbon_load4") == 12, "Something wrong in batch sort")
+    assert(getIndexfileCount("carbon_load4") == 10, "Something wrong in batch sort")
     CarbonProperties.getInstance().
       addProperty(CarbonCommonConstants.LOAD_SORT_SCOPE,
         CarbonCommonConstants.LOAD_SORT_SCOPE_DEFAULT)


[19/42] carbondata git commit: close dictionary server on application end

Posted by ra...@apache.org.
close dictionary server on application end


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/43e06b65
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/43e06b65
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/43e06b65

Branch: refs/heads/branch-1.1
Commit: 43e06b65a7fbeaf35dced6ece4f8014015960ba2
Parents: 50da524
Author: kunal642 <ku...@knoldus.in>
Authored: Sun May 21 23:12:59 2017 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 12:58:11 2017 +0530

----------------------------------------------------------------------
 .../carbondata/core/dictionary/server/DictionaryServer.java   | 4 +---
 .../spark/sql/execution/command/carbonTableSchema.scala       | 6 ++++++
 .../spark/sql/execution/command/carbonTableSchema.scala       | 7 +++++++
 3 files changed, 14 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/43e06b65/core/src/main/java/org/apache/carbondata/core/dictionary/server/DictionaryServer.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/dictionary/server/DictionaryServer.java b/core/src/main/java/org/apache/carbondata/core/dictionary/server/DictionaryServer.java
index f86cd6b..84f2a0d 100644
--- a/core/src/main/java/org/apache/carbondata/core/dictionary/server/DictionaryServer.java
+++ b/core/src/main/java/org/apache/carbondata/core/dictionary/server/DictionaryServer.java
@@ -135,11 +135,9 @@ public class DictionaryServer {
    * @throws Exception
    */
   public void shutdown() throws Exception {
+    LOGGER.info("Shutting down dictionary server");
     worker.shutdownGracefully();
     boss.shutdownGracefully();
-    // Wait until all threads are terminated.
-    boss.terminationFuture().sync();
-    worker.terminationFuture().sync();
   }
 
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/43e06b65/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
index 494beff..7258511 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
@@ -23,6 +23,7 @@ import scala.collection.JavaConverters._
 import scala.language.implicitConversions
 
 import org.apache.commons.lang3.StringUtils
+import org.apache.spark.scheduler.{SparkListener, SparkListenerApplicationEnd}
 import org.apache.spark.sql._
 import org.apache.spark.sql.catalyst.TableIdentifier
 import org.apache.spark.sql.catalyst.analysis.UnresolvedAttribute
@@ -525,6 +526,11 @@ case class LoadTable(
             val dictionaryServer = DictionaryServer
               .getInstance(dictionaryServerPort.toInt)
             carbonLoadModel.setDictionaryServerPort(dictionaryServer.getPort)
+            sqlContext.sparkContext.addSparkListener(new SparkListener() {
+              override def onApplicationEnd(applicationEnd: SparkListenerApplicationEnd) {
+                dictionaryServer.shutdown()
+              }
+            })
             Some(dictionaryServer)
           } else {
             None

http://git-wip-us.apache.org/repos/asf/carbondata/blob/43e06b65/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
index 09824d8..5dd6832 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
@@ -23,6 +23,7 @@ import scala.collection.JavaConverters._
 import scala.language.implicitConversions
 
 import org.apache.commons.lang3.StringUtils
+import org.apache.spark.scheduler.{SparkListener, SparkListenerApplicationEnd}
 import org.apache.spark.sql._
 import org.apache.spark.sql.catalyst.TableIdentifier
 import org.apache.spark.sql.catalyst.analysis.UnresolvedAttribute
@@ -533,10 +534,16 @@ case class LoadTable(
             val dictionaryServer = DictionaryServer
               .getInstance(dictionaryServerPort.toInt)
             carbonLoadModel.setDictionaryServerPort(dictionaryServer.getPort)
+            sparkSession.sparkContext.addSparkListener(new SparkListener() {
+              override def onApplicationEnd(applicationEnd: SparkListenerApplicationEnd) {
+                dictionaryServer.shutdown()
+              }
+            })
             Some(dictionaryServer)
           } else {
             None
           }
+
           CarbonDataRDDFactory.loadCarbonData(sparkSession.sqlContext,
             carbonLoadModel,
             relation.tableMeta.storePath,


[28/42] carbondata git commit: sessionstate hiveclient to be used for all the sql's run on hive metastore.

Posted by ra...@apache.org.
sessionstate hiveclient to be used for all the sql's run on hive metastore.


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/917152a7
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/917152a7
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/917152a7

Branch: refs/heads/branch-1.1
Commit: 917152a79f0ecdb49afda952da616f80f7865793
Parents: 0a0b7b1
Author: nareshpr <pr...@gmail.com>
Authored: Mon Jun 5 15:56:25 2017 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 13:17:19 2017 +0530

----------------------------------------------------------------------
 .../sql/execution/command/AlterTableCommands.scala      | 12 ++++++------
 .../scala/org/apache/spark/util/AlterTableUtil.scala    |  9 +++++----
 2 files changed, 11 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/917152a7/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/AlterTableCommands.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/AlterTableCommands.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/AlterTableCommands.scala
index 4ac3ea2..7969df4 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/AlterTableCommands.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/AlterTableCommands.scala
@@ -22,7 +22,7 @@ import scala.collection.mutable.ListBuffer
 import scala.language.implicitConversions
 
 import org.apache.spark.sql.{CarbonEnv, Row, SparkSession}
-import org.apache.spark.sql.hive.{CarbonRelation, HiveExternalCatalog}
+import org.apache.spark.sql.hive.{CarbonRelation, CarbonSessionState}
 import org.apache.spark.util.AlterTableUtil
 
 import org.apache.carbondata.common.logging.LogServiceFactory
@@ -100,7 +100,7 @@ private[sql] case class AlterTableAddColumns(
         .updateSchemaInfo(carbonTable,
           schemaConverter.fromWrapperToExternalSchemaEvolutionEntry(schemaEvolutionEntry),
           thriftTable)(sparkSession,
-          sparkSession.sharedState.externalCatalog.asInstanceOf[HiveExternalCatalog])
+          sparkSession.sessionState.asInstanceOf[CarbonSessionState])
       LOGGER.info(s"Alter table for add columns is successful for table $dbName.$tableName")
       LOGGER.audit(s"Alter table for add columns is successful for table $dbName.$tableName")
     } catch {
@@ -202,10 +202,10 @@ private[sql] case class AlterTableRenameTable(alterTableRenameModel: AlterTableR
           carbonTable.getStorePath)(sparkSession)
       CarbonEnv.getInstance(sparkSession).carbonMetastore
         .removeTableFromMetadata(oldDatabaseName, oldTableName)
-      sparkSession.sharedState.externalCatalog.asInstanceOf[HiveExternalCatalog].client
+      sparkSession.sessionState.asInstanceOf[CarbonSessionState].metadataHive
         .runSqlHive(
           s"ALTER TABLE $oldDatabaseName.$oldTableName RENAME TO $oldDatabaseName.$newTableName")
-      sparkSession.sharedState.externalCatalog.asInstanceOf[HiveExternalCatalog].client
+      sparkSession.sessionState.asInstanceOf[CarbonSessionState].metadataHive
         .runSqlHive(
           s"ALTER TABLE $oldDatabaseName.$newTableName SET SERDEPROPERTIES" +
           s"('tableName'='$newTableName', " +
@@ -339,7 +339,7 @@ private[sql] case class AlterTableDropColumns(
         .updateSchemaInfo(carbonTable,
           schemaEvolutionEntry,
           tableInfo)(sparkSession,
-          sparkSession.sharedState.externalCatalog.asInstanceOf[HiveExternalCatalog])
+          sparkSession.sessionState.asInstanceOf[CarbonSessionState])
       // TODO: 1. add check for deletion of index tables
       // delete dictionary files for dictionary column and clear dictionary cache from memory
       new AlterTableDropColumnRDD(sparkSession.sparkContext,
@@ -430,7 +430,7 @@ private[sql] case class AlterTableDataTypeChange(
         .updateSchemaInfo(carbonTable,
           schemaEvolutionEntry,
           tableInfo)(sparkSession,
-          sparkSession.sharedState.externalCatalog.asInstanceOf[HiveExternalCatalog])
+          sparkSession.sessionState.asInstanceOf[CarbonSessionState])
       LOGGER.info(s"Alter table for data type change is successful for table $dbName.$tableName")
       LOGGER.audit(s"Alter table for data type change is successful for table $dbName.$tableName")
     } catch {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/917152a7/integration/spark2/src/main/scala/org/apache/spark/util/AlterTableUtil.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/util/AlterTableUtil.scala b/integration/spark2/src/main/scala/org/apache/spark/util/AlterTableUtil.scala
index d7b1422..9e402cd 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/util/AlterTableUtil.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/util/AlterTableUtil.scala
@@ -23,7 +23,7 @@ import scala.collection.mutable.ListBuffer
 import org.apache.spark.SparkConf
 import org.apache.spark.sql.{CarbonEnv, SparkSession}
 import org.apache.spark.sql.catalyst.TableIdentifier
-import org.apache.spark.sql.hive.{CarbonRelation, HiveExternalCatalog}
+import org.apache.spark.sql.hive.{CarbonRelation, CarbonSessionState}
 import org.apache.spark.sql.hive.HiveExternalCatalog._
 
 import org.apache.carbondata.common.logging.LogServiceFactory
@@ -144,11 +144,12 @@ object AlterTableUtil {
    * @param schemaEvolutionEntry
    * @param thriftTable
    * @param sparkSession
-   * @param catalog
+   * @param sessionState
    */
   def updateSchemaInfo(carbonTable: CarbonTable,
       schemaEvolutionEntry: SchemaEvolutionEntry,
-      thriftTable: TableInfo)(sparkSession: SparkSession, catalog: HiveExternalCatalog): Unit = {
+      thriftTable: TableInfo)(sparkSession: SparkSession,
+      sessionState: CarbonSessionState): Unit = {
     val dbName = carbonTable.getDatabaseName
     val tableName = carbonTable.getFactTableName
     CarbonEnv.getInstance(sparkSession).carbonMetastore
@@ -160,7 +161,7 @@ object AlterTableUtil {
     val schema = CarbonEnv.getInstance(sparkSession).carbonMetastore
       .lookupRelation(tableIdentifier)(sparkSession).schema.json
     val schemaParts = prepareSchemaJsonForAlterTable(sparkSession.sparkContext.getConf, schema)
-    catalog.client.runSqlHive(
+    sessionState.metadataHive.runSqlHive(
       s"ALTER TABLE $dbName.$tableName SET TBLPROPERTIES($schemaParts)")
     sparkSession.catalog.refreshTable(tableIdentifier.quotedString)
   }


[39/42] carbondata git commit: Fixed Synchronization issue and improve IUD performance

Posted by ra...@apache.org.
Fixed Synchronization issue and improve IUD performance


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/da952e82
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/da952e82
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/da952e82

Branch: refs/heads/branch-1.1
Commit: da952e82b443839e9c8b7fdeebaed092d3232652
Parents: bbf5dc1
Author: kumarvishal <ku...@gmail.com>
Authored: Mon Jun 12 16:06:24 2017 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 13:32:15 2017 +0530

----------------------------------------------------------------------
 .../core/datastore/block/AbstractIndex.java     |  41 ++++++++
 .../core/datastore/block/TableBlockInfo.java    |  22 +++-
 .../core/mutate/CarbonUpdateUtil.java           |  16 +++
 .../core/mutate/DeleteDeltaBlockletDetails.java |  15 +--
 .../carbondata/core/mutate/DeleteDeltaVo.java   |  60 +++++++++++
 .../reader/CarbonDeleteFilesDataReader.java     |  47 +++++++++
 .../impl/DictionaryBasedResultCollector.java    |  11 +-
 .../collector/impl/RawBasedResultCollector.java |   7 +-
 ...structureBasedDictionaryResultCollector.java |   7 +-
 .../RestructureBasedRawResultCollector.java     |   7 +-
 .../executor/impl/AbstractQueryExecutor.java    |   9 +-
 .../scan/executor/infos/BlockExecutionInfo.java |  56 ++++++----
 .../scan/executor/infos/DeleteDeltaInfo.java    |  82 +++++++++++++++
 .../core/scan/result/AbstractScannedResult.java |  61 +++++++----
 .../AbstractDetailQueryResultIterator.java      | 103 ++++++++++++++++++-
 .../scan/scanner/AbstractBlockletScanner.java   |   9 --
 .../core/scan/scanner/impl/FilterScanner.java   |  10 --
 .../SegmentUpdateStatusManager.java             |  29 ++++--
 .../datastore/SegmentTaskIndexStoreTest.java    |   2 +-
 .../core/datastore/block/BlockInfoTest.java     |  12 +--
 .../datastore/block/TableBlockInfoTest.java     |  32 +++---
 .../core/datastore/block/TableTaskInfoTest.java |   8 +-
 .../carbondata/core/util/CarbonUtilTest.java    |   4 +-
 .../core/util/DataFileFooterConverterTest.java  |   8 +-
 .../carbondata/hadoop/CarbonInputFormat.java    |  11 +-
 .../carbondata/hadoop/CarbonInputSplit.java     |  39 +++++--
 .../internal/index/impl/InMemoryBTreeIndex.java |   5 +-
 .../carbondata/spark/rdd/CarbonMergerRDD.scala  |   3 +-
 .../spark/rdd/CarbonDataRDDFactory.scala        |   2 +-
 .../spark/rdd/CarbonDataRDDFactory.scala        |   2 +-
 .../carbon/datastore/BlockIndexStoreTest.java   |  28 ++---
 31 files changed, 574 insertions(+), 174 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/da952e82/core/src/main/java/org/apache/carbondata/core/datastore/block/AbstractIndex.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/block/AbstractIndex.java b/core/src/main/java/org/apache/carbondata/core/datastore/block/AbstractIndex.java
index b538dc3..4d0e56d 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/block/AbstractIndex.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/block/AbstractIndex.java
@@ -17,11 +17,13 @@
 package org.apache.carbondata.core.datastore.block;
 
 import java.util.List;
+import java.util.Map;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.carbondata.core.cache.Cacheable;
 import org.apache.carbondata.core.datastore.DataRefNode;
 import org.apache.carbondata.core.metadata.blocklet.DataFileFooter;
+import org.apache.carbondata.core.mutate.DeleteDeltaVo;
 
 public abstract class AbstractIndex implements Cacheable {
 
@@ -51,6 +53,16 @@ public abstract class AbstractIndex implements Cacheable {
   protected long memorySize;
 
   /**
+   * last fetch delete deltaFile timestamp
+   */
+  private long deleteDeltaTimestamp;
+
+  /**
+   * map of blockletidAndPageId to
+   * deleted rows
+   */
+  private Map<String, DeleteDeltaVo> deletedRowsMap;
+  /**
    * @return the segmentProperties
    */
   public SegmentProperties getSegmentProperties() {
@@ -124,4 +136,33 @@ public abstract class AbstractIndex implements Cacheable {
   public void setMemorySize(long memorySize) {
     this.memorySize = memorySize;
   }
+
+  /**
+   * @return latest deleted delta timestamp
+   */
+  public long getDeleteDeltaTimestamp() {
+    return deleteDeltaTimestamp;
+  }
+
+  /**
+   * set the latest delete delta timestamp
+   * @param deleteDeltaTimestamp
+   */
+  public void setDeleteDeltaTimestamp(long deleteDeltaTimestamp) {
+    this.deleteDeltaTimestamp = deleteDeltaTimestamp;
+  }
+
+  /**
+   * @return the deleted record for block map
+   */
+  public Map<String, DeleteDeltaVo> getDeletedRowsMap() {
+    return deletedRowsMap;
+  }
+
+  /**
+   * @param deletedRowsMap
+   */
+  public void setDeletedRowsMap(Map<String, DeleteDeltaVo> deletedRowsMap) {
+    this.deletedRowsMap = deletedRowsMap;
+  }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/da952e82/core/src/main/java/org/apache/carbondata/core/datastore/block/TableBlockInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/block/TableBlockInfo.java b/core/src/main/java/org/apache/carbondata/core/datastore/block/TableBlockInfo.java
index 8fbaa4a..44347cf 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/block/TableBlockInfo.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/block/TableBlockInfo.java
@@ -72,14 +72,20 @@ public class TableBlockInfo implements Distributable, Serializable {
   private Map<String, String> blockStorageIdMap =
           new HashMap<>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
 
+  /**
+   * delete delta files path for this block
+   */
+  private String[] deletedDeltaFilePath;
+
   public TableBlockInfo(String filePath, long blockOffset, String segmentId, String[] locations,
-      long blockLength, ColumnarFormatVersion version) {
+      long blockLength, ColumnarFormatVersion version, String[] deletedDeltaFilePath) {
     this.filePath = FileFactory.getUpdatedFilePath(filePath);
     this.blockOffset = blockOffset;
     this.segmentId = segmentId;
     this.locations = locations;
     this.blockLength = blockLength;
     this.version = version;
+    this.deletedDeltaFilePath = deletedDeltaFilePath;
   }
 
   /**
@@ -93,8 +99,9 @@ public class TableBlockInfo implements Distributable, Serializable {
    * @param blockletInfos
    */
   public TableBlockInfo(String filePath, long blockOffset, String segmentId, String[] locations,
-      long blockLength, BlockletInfos blockletInfos, ColumnarFormatVersion version) {
-    this(filePath, blockOffset, segmentId, locations, blockLength, version);
+      long blockLength, BlockletInfos blockletInfos, ColumnarFormatVersion version,
+      String[] deletedDeltaFilePath) {
+    this(filePath, blockOffset, segmentId, locations, blockLength, version, deletedDeltaFilePath);
     this.blockletInfos = blockletInfos;
   }
 
@@ -112,8 +119,9 @@ public class TableBlockInfo implements Distributable, Serializable {
    */
   public TableBlockInfo(String filePath, long blockOffset, String segmentId, String[] locations,
       long blockLength, BlockletInfos blockletInfos, ColumnarFormatVersion version,
-      Map<String, String> blockStorageIdMap) {
-    this(filePath, blockOffset, segmentId, locations, blockLength, blockletInfos, version);
+      Map<String, String> blockStorageIdMap, String[] deletedDeltaFilePath) {
+    this(filePath, blockOffset, segmentId, locations, blockLength, blockletInfos, version,
+        deletedDeltaFilePath);
     this.blockStorageIdMap = blockStorageIdMap;
   }
 
@@ -307,4 +315,8 @@ public class TableBlockInfo implements Distributable, Serializable {
   public void setBlockStorageIdMap(Map<String, String> blockStorageIdMap) {
     this.blockStorageIdMap = blockStorageIdMap;
   }
+
+  public String[] getDeletedDeltaFilePath() {
+    return deletedDeltaFilePath;
+  }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/da952e82/core/src/main/java/org/apache/carbondata/core/mutate/CarbonUpdateUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/mutate/CarbonUpdateUtil.java b/core/src/main/java/org/apache/carbondata/core/mutate/CarbonUpdateUtil.java
index fef5905..b5a632f 100644
--- a/core/src/main/java/org/apache/carbondata/core/mutate/CarbonUpdateUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/mutate/CarbonUpdateUtil.java
@@ -800,4 +800,20 @@ public class CarbonUpdateUtil {
 
   }
 
+  /**
+   * Below method will be used to get the latest delete delta file timestamp
+   * @param deleteDeltaFiles
+   * @return latest delete delta file time stamp
+   */
+  public static long getLatestDeleteDeltaTimestamp(String[] deleteDeltaFiles) {
+    long latestTimestamp = 0;
+    for (int i = 0; i < deleteDeltaFiles.length; i++) {
+      long convertTimeStampToLong = Long.parseLong(
+          CarbonTablePath.DataFileUtil.getTimeStampFromDeleteDeltaFile(deleteDeltaFiles[i]));
+      if (latestTimestamp < convertTimeStampToLong) {
+        latestTimestamp = convertTimeStampToLong;
+      }
+    }
+    return latestTimestamp;
+  }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/da952e82/core/src/main/java/org/apache/carbondata/core/mutate/DeleteDeltaBlockletDetails.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/mutate/DeleteDeltaBlockletDetails.java b/core/src/main/java/org/apache/carbondata/core/mutate/DeleteDeltaBlockletDetails.java
index 7df5f22..0f54f3a 100644
--- a/core/src/main/java/org/apache/carbondata/core/mutate/DeleteDeltaBlockletDetails.java
+++ b/core/src/main/java/org/apache/carbondata/core/mutate/DeleteDeltaBlockletDetails.java
@@ -21,9 +21,6 @@ import java.io.Serializable;
 import java.util.Set;
 import java.util.TreeSet;
 
-import org.apache.carbondata.common.logging.LogService;
-import org.apache.carbondata.common.logging.LogServiceFactory;
-
 /**
  * This class stores the blocklet details of delete delta file
  */
@@ -35,12 +32,6 @@ public class DeleteDeltaBlockletDetails implements Serializable {
 
   private Set<Integer> deletedRows;
 
-  /**
-   * LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(DeleteDeltaBlockletDetails.class.getName());
-
   public DeleteDeltaBlockletDetails(String id, Integer pageId) {
     this.id = id;
     deletedRows = new TreeSet<Integer>();
@@ -84,7 +75,11 @@ public class DeleteDeltaBlockletDetails implements Serializable {
   }
 
   @Override public int hashCode() {
-    return id.hashCode();
+    return id.hashCode() + pageId.hashCode();
+  }
+
+  public String getBlockletKey() {
+    return this.id + '_' + this.pageId;
   }
 
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/da952e82/core/src/main/java/org/apache/carbondata/core/mutate/DeleteDeltaVo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/mutate/DeleteDeltaVo.java b/core/src/main/java/org/apache/carbondata/core/mutate/DeleteDeltaVo.java
new file mode 100644
index 0000000..d68e4e9
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/mutate/DeleteDeltaVo.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.carbondata.core.mutate;
+
+import java.util.BitSet;
+import java.util.Iterator;
+import java.util.Set;
+
+/**
+ * Class which keep the information about the rows
+ * while got deleted
+ */
+public class DeleteDeltaVo {
+
+  /**
+   * deleted rows bitset
+   */
+  private BitSet bitSet;
+
+  public DeleteDeltaVo() {
+    bitSet = new BitSet();
+  }
+
+  /**
+   * Below method will be used to insert the rows
+   * which are deleted
+   *
+   * @param data
+   */
+  public void insertData(Set<Integer> data) {
+    Iterator<Integer> iterator = data.iterator();
+    while (iterator.hasNext()) {
+      bitSet.set(iterator.next());
+    }
+  }
+
+  /**
+   * below method will be used to check the row is deleted or not
+   *
+   * @param counter
+   * @return
+   */
+  public boolean containsRow(int counter) {
+    return bitSet.get(counter);
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/da952e82/core/src/main/java/org/apache/carbondata/core/reader/CarbonDeleteFilesDataReader.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/reader/CarbonDeleteFilesDataReader.java b/core/src/main/java/org/apache/carbondata/core/reader/CarbonDeleteFilesDataReader.java
index e689566..417ad29 100644
--- a/core/src/main/java/org/apache/carbondata/core/reader/CarbonDeleteFilesDataReader.java
+++ b/core/src/main/java/org/apache/carbondata/core/reader/CarbonDeleteFilesDataReader.java
@@ -24,6 +24,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
@@ -35,6 +36,7 @@ import org.apache.carbondata.core.constants.CarbonCommonConstants;
 import org.apache.carbondata.core.datastore.impl.FileFactory;
 import org.apache.carbondata.core.mutate.DeleteDeltaBlockDetails;
 import org.apache.carbondata.core.mutate.DeleteDeltaBlockletDetails;
+import org.apache.carbondata.core.mutate.DeleteDeltaVo;
 import org.apache.carbondata.core.util.CarbonProperties;
 
 
@@ -120,7 +122,52 @@ public class CarbonDeleteFilesDataReader {
       }
     }
     return pageIdDeleteRowsMap;
+  }
 
+  /**
+   * Below method will be used to read the delete delta files
+   * and get the map of blockletid and page id mapping to deleted
+   * rows
+   *
+   * @param deltaFiles delete delta files array
+   * @return map of blockletid_pageid to deleted rows
+   */
+  public Map<String, DeleteDeltaVo> getDeletedRowsDataVo(String[] deltaFiles) {
+    List<Future<DeleteDeltaBlockDetails>> taskSubmitList = new ArrayList<>();
+    ExecutorService executorService = Executors.newFixedThreadPool(thread_pool_size);
+    for (final String deltaFile : deltaFiles) {
+      taskSubmitList.add(executorService.submit(new Callable<DeleteDeltaBlockDetails>() {
+        @Override public DeleteDeltaBlockDetails call() throws IOException {
+          CarbonDeleteDeltaFileReaderImpl deltaFileReader =
+              new CarbonDeleteDeltaFileReaderImpl(deltaFile, FileFactory.getFileType(deltaFile));
+          return deltaFileReader.readJson();
+        }
+      }));
+    }
+    try {
+      executorService.shutdown();
+      executorService.awaitTermination(30, TimeUnit.MINUTES);
+    } catch (InterruptedException e) {
+      LOGGER.error("Error while reading the delete delta files : " + e.getMessage());
+    }
+    Map<String, DeleteDeltaVo> pageIdToBlockLetVo = new HashMap<>();
+    List<DeleteDeltaBlockletDetails> blockletDetails = null;
+    for (int i = 0; i < taskSubmitList.size(); i++) {
+      try {
+        blockletDetails = taskSubmitList.get(i).get().getBlockletDetails();
+      } catch (InterruptedException | ExecutionException e) {
+        throw new RuntimeException(e);
+      }
+      for (DeleteDeltaBlockletDetails blockletDetail : blockletDetails) {
+        DeleteDeltaVo deleteDeltaVo = pageIdToBlockLetVo.get(blockletDetail.getBlockletKey());
+        if (null == deleteDeltaVo) {
+          deleteDeltaVo = new DeleteDeltaVo();
+          pageIdToBlockLetVo.put(blockletDetail.getBlockletKey(), deleteDeltaVo);
+        }
+        deleteDeltaVo.insertData(blockletDetail.getDeletedRows());
+      }
+    }
+    return pageIdToBlockLetVo;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/carbondata/blob/da952e82/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedResultCollector.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedResultCollector.java b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedResultCollector.java
index d4d16d0..dba92ad 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedResultCollector.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedResultCollector.java
@@ -17,9 +17,11 @@
 package org.apache.carbondata.core.scan.collector.impl;
 
 import java.nio.ByteBuffer;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
 
-import org.apache.carbondata.core.cache.update.BlockletLevelDeleteDeltaDataCache;
 import org.apache.carbondata.core.constants.CarbonCommonConstants;
 import org.apache.carbondata.core.keygenerator.directdictionary.DirectDictionaryGenerator;
 import org.apache.carbondata.core.keygenerator.directdictionary.DirectDictionaryKeyGeneratorFactory;
@@ -90,8 +92,6 @@ public class DictionaryBasedResultCollector extends AbstractScannedResultCollect
     int[] surrogateResult;
     String[] noDictionaryKeys;
     byte[][] complexTypeKeyArray;
-    BlockletLevelDeleteDeltaDataCache deleteDeltaDataCache =
-        scannedResult.getDeleteDeltaDataCache();
     while (scannedResult.hasNext() && rowCounter < batchSize) {
       Object[] row = new Object[queryDimensions.length + queryMeasures.length];
       if (isDimensionExists) {
@@ -108,8 +108,7 @@ public class DictionaryBasedResultCollector extends AbstractScannedResultCollect
       } else {
         scannedResult.incrementCounter();
       }
-      if (null != deleteDeltaDataCache && deleteDeltaDataCache
-          .contains(scannedResult.getCurrentRowId(), scannedResult.getCurrentPageCounter())) {
+      if (scannedResult.containsDeletedRow(scannedResult.getCurrentRowId())) {
         continue;
       }
       fillMeasureData(scannedResult, row);

http://git-wip-us.apache.org/repos/asf/carbondata/blob/da952e82/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RawBasedResultCollector.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RawBasedResultCollector.java b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RawBasedResultCollector.java
index 478dc8c..3e82257 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RawBasedResultCollector.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RawBasedResultCollector.java
@@ -20,7 +20,6 @@ import java.nio.charset.Charset;
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.carbondata.core.cache.update.BlockletLevelDeleteDeltaDataCache;
 import org.apache.carbondata.core.constants.CarbonCommonConstants;
 import org.apache.carbondata.core.scan.executor.infos.BlockExecutionInfo;
 import org.apache.carbondata.core.scan.model.QueryMeasure;
@@ -54,15 +53,11 @@ public class RawBasedResultCollector extends AbstractScannedResultCollector {
   @Override public List<Object[]> collectData(AbstractScannedResult scannedResult, int batchSize) {
     List<Object[]> listBasedResult = new ArrayList<>(batchSize);
     QueryMeasure[] queryMeasures = tableBlockExecutionInfos.getQueryMeasures();
-    BlockletLevelDeleteDeltaDataCache deleteDeltaDataCache =
-        scannedResult.getDeleteDeltaDataCache();
     // scan the record and add to list
     int rowCounter = 0;
     while (scannedResult.hasNext() && rowCounter < batchSize) {
       scanResultAndGetData(scannedResult);
-      if (null != deleteDeltaDataCache && deleteDeltaDataCache
-          .contains(scannedResult.getCurrentRowId(),
-              scannedResult.getCurrentPageCounter())) {
+      if (scannedResult.containsDeletedRow(scannedResult.getCurrentRowId())) {
         continue;
       }
       prepareRow(scannedResult, listBasedResult, queryMeasures);

http://git-wip-us.apache.org/repos/asf/carbondata/blob/da952e82/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RestructureBasedDictionaryResultCollector.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RestructureBasedDictionaryResultCollector.java b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RestructureBasedDictionaryResultCollector.java
index 4fa1494..8f89760 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RestructureBasedDictionaryResultCollector.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RestructureBasedDictionaryResultCollector.java
@@ -20,7 +20,6 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.carbondata.core.cache.update.BlockletLevelDeleteDeltaDataCache;
 import org.apache.carbondata.core.scan.executor.infos.BlockExecutionInfo;
 import org.apache.carbondata.core.scan.filter.GenericQueryType;
 import org.apache.carbondata.core.scan.result.AbstractScannedResult;
@@ -50,8 +49,6 @@ public class RestructureBasedDictionaryResultCollector extends DictionaryBasedRe
     int[] surrogateResult;
     String[] noDictionaryKeys;
     byte[][] complexTypeKeyArray;
-    BlockletLevelDeleteDeltaDataCache deleteDeltaDataCache =
-        scannedResult.getDeleteDeltaDataCache();
     Map<Integer, GenericQueryType> comlexDimensionInfoMap =
         tableBlockExecutionInfos.getComlexDimensionInfoMap();
     while (scannedResult.hasNext() && rowCounter < batchSize) {
@@ -80,9 +77,7 @@ public class RestructureBasedDictionaryResultCollector extends DictionaryBasedRe
       } else {
         scannedResult.incrementCounter();
       }
-      if (null != deleteDeltaDataCache && deleteDeltaDataCache
-          .contains(scannedResult.getCurrentRowId(),
-              scannedResult.getCurrentPageCounter())) {
+      if (scannedResult.containsDeletedRow(scannedResult.getCurrentRowId())) {
         continue;
       }
       fillMeasureData(scannedResult, row);

http://git-wip-us.apache.org/repos/asf/carbondata/blob/da952e82/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RestructureBasedRawResultCollector.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RestructureBasedRawResultCollector.java b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RestructureBasedRawResultCollector.java
index 2de74fa..479a684 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RestructureBasedRawResultCollector.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RestructureBasedRawResultCollector.java
@@ -21,7 +21,6 @@ import java.util.List;
 
 import org.apache.carbondata.common.logging.LogService;
 import org.apache.carbondata.common.logging.LogServiceFactory;
-import org.apache.carbondata.core.cache.update.BlockletLevelDeleteDeltaDataCache;
 import org.apache.carbondata.core.constants.CarbonCommonConstants;
 import org.apache.carbondata.core.datastore.block.SegmentProperties;
 import org.apache.carbondata.core.keygenerator.KeyGenException;
@@ -152,15 +151,11 @@ public class RestructureBasedRawResultCollector extends RawBasedResultCollector
   @Override public List<Object[]> collectData(AbstractScannedResult scannedResult, int batchSize) {
     List<Object[]> listBasedResult = new ArrayList<>(batchSize);
     QueryMeasure[] queryMeasures = tableBlockExecutionInfos.getActualQueryMeasures();
-    BlockletLevelDeleteDeltaDataCache deleteDeltaDataCache =
-        scannedResult.getDeleteDeltaDataCache();
     // scan the record and add to list
     int rowCounter = 0;
     while (scannedResult.hasNext() && rowCounter < batchSize) {
       scanResultAndGetData(scannedResult);
-      if (null != deleteDeltaDataCache && deleteDeltaDataCache
-          .contains(scannedResult.getCurrentRowId(),
-              scannedResult.getCurrentPageCounter())) {
+      if (scannedResult.containsDeletedRow(scannedResult.getCurrentRowId())) {
         continue;
       }
       // re-fill dictionary and no dictionary key arrays for the newly added columns

http://git-wip-us.apache.org/repos/asf/carbondata/blob/da952e82/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java b/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
index 2a5c342..ba7530d 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
@@ -193,7 +193,8 @@ public abstract class AbstractQueryExecutor<E> implements QueryExecutor<E> {
           getBlockExecutionInfoForBlock(queryModel, queryProperties.dataBlocks.get(i),
               queryModel.getTableBlockInfos().get(i).getBlockletInfos().getStartBlockletNumber(),
               queryModel.getTableBlockInfos().get(i).getBlockletInfos().getNumberOfBlockletToScan(),
-              queryModel.getTableBlockInfos().get(i).getFilePath()));
+              queryModel.getTableBlockInfos().get(i).getFilePath(),
+              queryModel.getTableBlockInfos().get(i).getDeletedDeltaFilePath()));
     }
     if (null != queryModel.getStatisticsRecorder()) {
       QueryStatistic queryStatistic = new QueryStatistic();
@@ -214,7 +215,8 @@ public abstract class AbstractQueryExecutor<E> implements QueryExecutor<E> {
    * @throws QueryExecutionException any failure during block info creation
    */
   protected BlockExecutionInfo getBlockExecutionInfoForBlock(QueryModel queryModel,
-      AbstractIndex blockIndex, int startBlockletIndex, int numberOfBlockletToScan, String filePath)
+      AbstractIndex blockIndex, int startBlockletIndex, int numberOfBlockletToScan, String filePath,
+      String[] deleteDeltaFiles)
       throws QueryExecutionException {
     BlockExecutionInfo blockExecutionInfo = new BlockExecutionInfo();
     SegmentProperties segmentProperties = blockIndex.getSegmentProperties();
@@ -232,6 +234,7 @@ public abstract class AbstractQueryExecutor<E> implements QueryExecutor<E> {
             queryModel.getAbsoluteTableIdentifier().getCarbonTableIdentifier()).getFactDir()
         .length() + 1;
     blockExecutionInfo.setBlockId(filePath.substring(tableFactPathLength));
+    blockExecutionInfo.setDeleteDeltaFilePath(deleteDeltaFiles);
     blockExecutionInfo.setStartBlockletIndex(startBlockletIndex);
     blockExecutionInfo.setNumberOfBlockletToScan(numberOfBlockletToScan);
     blockExecutionInfo.setQueryDimensions(currentBlockQueryDimensions
@@ -360,8 +363,6 @@ public abstract class AbstractQueryExecutor<E> implements QueryExecutor<E> {
     // setting the no dictionary column block indexes
     blockExecutionInfo.setNoDictionaryBlockIndexes(ArrayUtils.toPrimitive(
         noDictionaryColumnBlockIndex.toArray(new Integer[noDictionaryColumnBlockIndex.size()])));
-    // setting column id to dictionary mapping
-    blockExecutionInfo.setColumnIdToDcitionaryMapping(queryProperties.columnToDictionayMapping);
     // setting each column value size
     blockExecutionInfo.setEachColumnValueSize(segmentProperties.getEachDimColumnValueSize());
     blockExecutionInfo.setComplexColumnParentBlockIndexes(

http://git-wip-us.apache.org/repos/asf/carbondata/blob/da952e82/core/src/main/java/org/apache/carbondata/core/scan/executor/infos/BlockExecutionInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/executor/infos/BlockExecutionInfo.java b/core/src/main/java/org/apache/carbondata/core/scan/executor/infos/BlockExecutionInfo.java
index b294b58..7d08dda 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/executor/infos/BlockExecutionInfo.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/executor/infos/BlockExecutionInfo.java
@@ -18,12 +18,12 @@ package org.apache.carbondata.core.scan.executor.infos;
 
 import java.util.Map;
 
-import org.apache.carbondata.core.cache.dictionary.Dictionary;
 import org.apache.carbondata.core.datastore.DataRefNode;
 import org.apache.carbondata.core.datastore.IndexKey;
 import org.apache.carbondata.core.datastore.block.AbstractIndex;
 import org.apache.carbondata.core.keygenerator.KeyGenerator;
 import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier;
+import org.apache.carbondata.core.mutate.DeleteDeltaVo;
 import org.apache.carbondata.core.scan.filter.GenericQueryType;
 import org.apache.carbondata.core.scan.filter.executer.FilterExecuter;
 import org.apache.carbondata.core.scan.model.QueryDimension;
@@ -101,12 +101,6 @@ public class BlockExecutionInfo {
   private int[] projectionListMeasureIndexes;
 
   /**
-   * this will be used to update the older block fixed length keys with the
-   * new block fixed length key
-   */
-  private KeyStructureInfo keyStructureInfo;
-
-  /**
    * first block from which query execution will start
    */
   private DataRefNode firstDataBlock;
@@ -146,12 +140,6 @@ public class BlockExecutionInfo {
   private Map<Integer, KeyStructureInfo> columnGroupToKeyStructureInfo;
 
   /**
-   * mapping of dictionary dimension to its dictionary mapping which will be
-   * used to get the actual data from dictionary for aggregation, sorting
-   */
-  private Map<String, Dictionary> columnIdToDcitionaryMapping;
-
-  /**
    * filter tree to execute the filter
    */
   private FilterExecuter filterExecuterTree;
@@ -230,6 +218,13 @@ public class BlockExecutionInfo {
    */
   private AbsoluteTableIdentifier absoluteTableIdentifier;
 
+  /**
+   * delete delta file path
+   */
+  private String[] deleteDeltaFilePath;
+
+  private Map<String, DeleteDeltaVo> deletedRecordsMap;
+
   public AbsoluteTableIdentifier getAbsoluteTableIdentifier() {
     return absoluteTableIdentifier;
   }
@@ -484,13 +479,6 @@ public class BlockExecutionInfo {
     this.columnGroupToKeyStructureInfo = columnGroupToKeyStructureInfo;
   }
 
-  /**
-   * @param columnIdToDcitionaryMapping the columnIdToDcitionaryMapping to set
-   */
-  public void setColumnIdToDcitionaryMapping(Map<String, Dictionary> columnIdToDcitionaryMapping) {
-    this.columnIdToDcitionaryMapping = columnIdToDcitionaryMapping;
-  }
-
   public boolean isRawRecordDetailQuery() {
     return isRawRecordDetailQuery;
   }
@@ -643,4 +631,32 @@ public class BlockExecutionInfo {
     this.projectionListMeasureIndexes = projectionListMeasureIndexes;
   }
 
+  /**
+   * @return delete delta files
+   */
+  public String[] getDeleteDeltaFilePath() {
+    return deleteDeltaFilePath;
+  }
+
+  /**
+   * set the delete delta files
+   * @param deleteDeltaFilePath
+   */
+  public void setDeleteDeltaFilePath(String[] deleteDeltaFilePath) {
+    this.deleteDeltaFilePath = deleteDeltaFilePath;
+  }
+
+  /**
+   * @return deleted record map
+   */
+  public Map<String, DeleteDeltaVo> getDeletedRecordsMap() {
+    return deletedRecordsMap;
+  }
+
+  /**
+   * @param deletedRecordsMap
+   */
+  public void setDeletedRecordsMap(Map<String, DeleteDeltaVo> deletedRecordsMap) {
+    this.deletedRecordsMap = deletedRecordsMap;
+  }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/da952e82/core/src/main/java/org/apache/carbondata/core/scan/executor/infos/DeleteDeltaInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/executor/infos/DeleteDeltaInfo.java b/core/src/main/java/org/apache/carbondata/core/scan/executor/infos/DeleteDeltaInfo.java
new file mode 100644
index 0000000..52fa529
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/scan/executor/infos/DeleteDeltaInfo.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.carbondata.core.scan.executor.infos;
+
+import java.util.Arrays;
+
+import org.apache.carbondata.core.mutate.CarbonUpdateUtil;
+
+/**
+ * class to hold information about delete delta files
+ */
+public class DeleteDeltaInfo {
+
+  /**
+   * delete delta files
+   */
+  private String[] deleteDeltaFile;
+
+  /**
+   * latest delete delta file timestamp
+   */
+  private long latestDeleteDeltaFileTimestamp;
+
+  public DeleteDeltaInfo(String[] deleteDeltaFile) {
+    this.deleteDeltaFile = deleteDeltaFile;
+    this.latestDeleteDeltaFileTimestamp =
+        CarbonUpdateUtil.getLatestDeleteDeltaTimestamp(deleteDeltaFile);
+  }
+
+  public String[] getDeleteDeltaFile() {
+    return deleteDeltaFile;
+  }
+
+  public long getLatestDeleteDeltaFileTimestamp() {
+    return latestDeleteDeltaFileTimestamp;
+  }
+
+  @Override public int hashCode() {
+    final int prime = 31;
+    int result = 1;
+    result = prime * result + Arrays.hashCode(deleteDeltaFile);
+    result =
+        prime * result + (int) (latestDeleteDeltaFileTimestamp ^ (latestDeleteDeltaFileTimestamp
+            >>> 32));
+    return result;
+  }
+
+  @Override public boolean equals(Object obj) {
+    if (this == obj) {
+      return true;
+    }
+    if (obj == null) {
+      return false;
+    }
+    if (getClass() != obj.getClass()) {
+      return false;
+    }
+    DeleteDeltaInfo other = (DeleteDeltaInfo) obj;
+    if (!Arrays.equals(deleteDeltaFile, other.deleteDeltaFile)) {
+      return false;
+    }
+    if (latestDeleteDeltaFileTimestamp != other.latestDeleteDeltaFileTimestamp) {
+      return false;
+    }
+    return true;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/da952e82/core/src/main/java/org/apache/carbondata/core/scan/result/AbstractScannedResult.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/result/AbstractScannedResult.java b/core/src/main/java/org/apache/carbondata/core/scan/result/AbstractScannedResult.java
index 1dda1aa..c24b73c 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/result/AbstractScannedResult.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/result/AbstractScannedResult.java
@@ -25,11 +25,13 @@ import java.util.Map;
 
 import org.apache.carbondata.common.logging.LogService;
 import org.apache.carbondata.common.logging.LogServiceFactory;
-import org.apache.carbondata.core.cache.update.BlockletLevelDeleteDeltaDataCache;
 import org.apache.carbondata.core.constants.CarbonCommonConstants;
 import org.apache.carbondata.core.datastore.chunk.DimensionColumnDataChunk;
 import org.apache.carbondata.core.datastore.chunk.MeasureColumnDataChunk;
 import org.apache.carbondata.core.datastore.chunk.impl.DimensionRawColumnChunk;
+import org.apache.carbondata.core.mutate.CarbonUpdateUtil;
+import org.apache.carbondata.core.mutate.DeleteDeltaVo;
+import org.apache.carbondata.core.mutate.TupleIdEnum;
 import org.apache.carbondata.core.scan.executor.infos.BlockExecutionInfo;
 import org.apache.carbondata.core.scan.executor.infos.KeyStructureInfo;
 import org.apache.carbondata.core.scan.filter.GenericQueryType;
@@ -125,7 +127,20 @@ public abstract class AbstractScannedResult {
    */
   private int[] complexParentBlockIndexes;
 
-  protected BlockletLevelDeleteDeltaDataCache blockletDeleteDeltaCache;
+  /**
+   * blockletid+pageumber to deleted reocrd map
+   */
+  private Map<String, DeleteDeltaVo> deletedRecordMap;
+
+  /**
+   * current page delete delta vo
+   */
+  private DeleteDeltaVo currentDeleteDeltaVo;
+
+  /**
+   * actual blocklet number
+   */
+  private String blockletNumber;
 
   public AbstractScannedResult(BlockExecutionInfo blockExecutionInfo) {
     this.fixedLengthKeySize = blockExecutionInfo.getFixedLengthKeySize();
@@ -135,6 +150,7 @@ public abstract class AbstractScannedResult {
     this.complexParentIndexToQueryMap = blockExecutionInfo.getComlexDimensionInfoMap();
     this.complexParentBlockIndexes = blockExecutionInfo.getComplexColumnParentBlockIndexes();
     this.totalDimensionsSize = blockExecutionInfo.getQueryDimensions().length;
+    this.deletedRecordMap = blockExecutionInfo.getDeletedRecordsMap();
   }
 
   /**
@@ -393,6 +409,12 @@ public abstract class AbstractScannedResult {
    */
   public void setBlockletId(String blockletId) {
     this.blockletId = CarbonTablePath.getShortBlockId(blockletId);
+    blockletNumber = CarbonUpdateUtil.getRequiredFieldFromTID(blockletId, TupleIdEnum.BLOCKLET_ID);
+    // if deleted recors map is present for this block
+    // then get the first page deleted vo
+    if (null != deletedRecordMap) {
+      currentDeleteDeltaVo = deletedRecordMap.get(blockletNumber + '_' + pageCounter);
+    }
   }
 
   /**
@@ -457,6 +479,9 @@ public abstract class AbstractScannedResult {
       pageCounter++;
       rowCounter = 0;
       currentRow = -1;
+      if (null != deletedRecordMap) {
+        currentDeleteDeltaVo = deletedRecordMap.get(blockletNumber + pageCounter + "");
+      }
       return hasNext();
     }
     return false;
@@ -629,21 +654,6 @@ public abstract class AbstractScannedResult {
   public abstract String[] getNoDictionaryKeyStringArray();
 
   /**
-   * @return BlockletLevelDeleteDeltaDataCache.
-   */
-  public BlockletLevelDeleteDeltaDataCache getDeleteDeltaDataCache() {
-    return blockletDeleteDeltaCache;
-  }
-
-  /**
-   * @param blockletDeleteDeltaCache
-   */
-  public void setBlockletDeleteDeltaCache(
-      BlockletLevelDeleteDeltaDataCache blockletDeleteDeltaCache) {
-    this.blockletDeleteDeltaCache = blockletDeleteDeltaCache;
-  }
-
-  /**
    * Mark the filtered rows in columnar batch. These rows will not be added to vector batches later.
    * @param columnarBatch
    * @param startRow
@@ -653,11 +663,11 @@ public abstract class AbstractScannedResult {
   public int markFilteredRows(CarbonColumnarBatch columnarBatch, int startRow, int size,
       int vectorOffset) {
     int rowsFiltered = 0;
-    if (blockletDeleteDeltaCache != null) {
+    if (currentDeleteDeltaVo != null) {
       int len = startRow + size;
       for (int i = startRow; i < len; i++) {
         int rowId = rowMapping != null ? rowMapping[pageCounter][i] : i;
-        if (blockletDeleteDeltaCache.contains(rowId, pageCounter)) {
+        if (currentDeleteDeltaVo.containsRow(rowId)) {
           columnarBatch.markFiltered(vectorOffset);
           rowsFiltered++;
         }
@@ -666,4 +676,17 @@ public abstract class AbstractScannedResult {
     }
     return rowsFiltered;
   }
+
+  /**
+   * Below method will be used to check row got deleted
+   *
+   * @param rowId
+   * @return is present in deleted row
+   */
+  public boolean containsDeletedRow(int rowId) {
+    if (null != currentDeleteDeltaVo) {
+      return currentDeleteDeltaVo.containsRow(rowId);
+    }
+    return false;
+  }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/da952e82/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/AbstractDetailQueryResultIterator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/AbstractDetailQueryResultIterator.java b/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/AbstractDetailQueryResultIterator.java
index a0823af..92e9594 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/AbstractDetailQueryResultIterator.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/AbstractDetailQueryResultIterator.java
@@ -18,6 +18,8 @@ package org.apache.carbondata.core.scan.result.iterator;
 
 import java.io.IOException;
 import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ExecutorService;
 
 import org.apache.carbondata.common.CarbonIterator;
@@ -27,9 +29,13 @@ import org.apache.carbondata.core.constants.CarbonCommonConstants;
 import org.apache.carbondata.core.datastore.DataRefNode;
 import org.apache.carbondata.core.datastore.DataRefNodeFinder;
 import org.apache.carbondata.core.datastore.FileHolder;
+import org.apache.carbondata.core.datastore.block.AbstractIndex;
 import org.apache.carbondata.core.datastore.impl.FileFactory;
 import org.apache.carbondata.core.datastore.impl.btree.BTreeDataRefNodeFinder;
+import org.apache.carbondata.core.mutate.DeleteDeltaVo;
+import org.apache.carbondata.core.reader.CarbonDeleteFilesDataReader;
 import org.apache.carbondata.core.scan.executor.infos.BlockExecutionInfo;
+import org.apache.carbondata.core.scan.executor.infos.DeleteDeltaInfo;
 import org.apache.carbondata.core.scan.model.QueryModel;
 import org.apache.carbondata.core.scan.processor.AbstractDataBlockIterator;
 import org.apache.carbondata.core.scan.processor.impl.DataBlockIteratorImpl;
@@ -53,6 +59,9 @@ public abstract class AbstractDetailQueryResultIterator<E> extends CarbonIterato
   private static final LogService LOGGER =
       LogServiceFactory.getLogService(AbstractDetailQueryResultIterator.class.getName());
 
+  private static final Map<DeleteDeltaInfo, Object> deleteDeltaToLockObjectMap =
+      new ConcurrentHashMap<>();
+
   protected ExecutorService execService;
   /**
    * execution info of the block
@@ -77,7 +86,7 @@ public abstract class AbstractDetailQueryResultIterator<E> extends CarbonIterato
   /**
    * queryStatisticsModel to store query statistics object
    */
-  QueryStatisticsModel queryStatisticsModel;
+  private QueryStatisticsModel queryStatisticsModel;
 
   public AbstractDetailQueryResultIterator(List<BlockExecutionInfo> infos, QueryModel queryModel,
       ExecutorService execService) {
@@ -105,13 +114,24 @@ public abstract class AbstractDetailQueryResultIterator<E> extends CarbonIterato
 
   private void intialiseInfos() {
     for (BlockExecutionInfo blockInfo : blockExecutionInfos) {
-      DataRefNodeFinder finder = new BTreeDataRefNodeFinder(blockInfo.getEachColumnValueSize());
+      Map<String, DeleteDeltaVo> deletedRowsMap = null;
+      DataRefNodeFinder finder = new BTreeDataRefNodeFinder(blockInfo.getEachColumnValueSize(),
+          blockInfo.getDataBlock().getSegmentProperties().getNumberOfSortColumns(),
+          blockInfo.getDataBlock().getSegmentProperties().getNumberOfNoDictSortColumns());
+      // if delete delta file is present
+      if (null != blockInfo.getDeleteDeltaFilePath() && 0 != blockInfo
+          .getDeleteDeltaFilePath().length) {
+        DeleteDeltaInfo deleteDeltaInfo = new DeleteDeltaInfo(blockInfo.getDeleteDeltaFilePath());
+        // read and get the delete detail block details
+        deletedRowsMap = getDeleteDeltaDetails(blockInfo.getDataBlock(), deleteDeltaInfo);
+        // set the deleted row to block execution info
+        blockInfo.setDeletedRecordsMap(deletedRowsMap);
+      }
       DataRefNode startDataBlock = finder
           .findFirstDataBlock(blockInfo.getDataBlock().getDataRefNode(), blockInfo.getStartKey());
       while (startDataBlock.nodeNumber() < blockInfo.getStartBlockletIndex()) {
         startDataBlock = startDataBlock.getNextDataRefNode();
       }
-
       long numberOfBlockToScan = blockInfo.getNumberOfBlockletToScan();
       //if number of block is less than 0 then take end block.
       if (numberOfBlockToScan <= 0) {
@@ -124,6 +144,83 @@ public abstract class AbstractDetailQueryResultIterator<E> extends CarbonIterato
     }
   }
 
+  /**
+   * Below method will be used to get the delete delta rows for a block
+   *
+   * @param dataBlock       data block
+   * @param deleteDeltaInfo delete delta info
+   * @return blockid+pageid to deleted row mapping
+   */
+  private Map<String, DeleteDeltaVo> getDeleteDeltaDetails(AbstractIndex dataBlock,
+      DeleteDeltaInfo deleteDeltaInfo) {
+    // if datablock deleted delta timestamp is more then the current delete delta files timestamp
+    // then return the current deleted rows
+    if (dataBlock.getDeleteDeltaTimestamp() >= deleteDeltaInfo
+        .getLatestDeleteDeltaFileTimestamp()) {
+      return dataBlock.getDeletedRowsMap();
+    }
+    CarbonDeleteFilesDataReader carbonDeleteDeltaFileReader = null;
+    // get the lock object so in case of concurrent query only one task will read the delete delta
+    // files other tasks will wait
+    Object lockObject = deleteDeltaToLockObjectMap.get(deleteDeltaInfo);
+    // if lock object is null then add a lock object
+    if (null == lockObject) {
+      synchronized (deleteDeltaToLockObjectMap) {
+        // double checking
+        lockObject = deleteDeltaToLockObjectMap.get(deleteDeltaInfo);
+        if (null == lockObject) {
+          lockObject = new Object();
+          deleteDeltaToLockObjectMap.put(deleteDeltaInfo, lockObject);
+        }
+      }
+    }
+    // double checking to check the deleted rows is already present or not
+    if (dataBlock.getDeleteDeltaTimestamp() < deleteDeltaInfo.getLatestDeleteDeltaFileTimestamp()) {
+      // if not then acquire the lock
+      synchronized (lockObject) {
+        // check the timestamp again
+        if (dataBlock.getDeleteDeltaTimestamp() < deleteDeltaInfo
+            .getLatestDeleteDeltaFileTimestamp()) {
+          // read the delete delta files
+          carbonDeleteDeltaFileReader = new CarbonDeleteFilesDataReader();
+          Map<String, DeleteDeltaVo> deletedRowsMap = carbonDeleteDeltaFileReader
+              .getDeletedRowsDataVo(deleteDeltaInfo.getDeleteDeltaFile());
+          setDeltedDeltaBoToDataBlock(deleteDeltaInfo, deletedRowsMap, dataBlock);
+          // remove the lock
+          deleteDeltaToLockObjectMap.remove(deleteDeltaInfo);
+          return deletedRowsMap;
+        } else {
+          return dataBlock.getDeletedRowsMap();
+        }
+      }
+    } else {
+      return dataBlock.getDeletedRowsMap();
+    }
+  }
+
+  /**
+   * Below method will be used to set deleted records map to data block
+   * based on latest delta file timestamp
+   *
+   * @param deleteDeltaInfo
+   * @param deletedRecordsMap
+   * @param dataBlock
+   */
+  private void setDeltedDeltaBoToDataBlock(DeleteDeltaInfo deleteDeltaInfo,
+      Map<String, DeleteDeltaVo> deletedRecordsMap, AbstractIndex dataBlock) {
+    // check if timestamp of data block is less than the latest delete delta timestamp
+    // then update the delete delta details and timestamp in data block
+    if (dataBlock.getDeleteDeltaTimestamp() < deleteDeltaInfo.getLatestDeleteDeltaFileTimestamp()) {
+      synchronized (dataBlock) {
+        if (dataBlock.getDeleteDeltaTimestamp() < deleteDeltaInfo
+            .getLatestDeleteDeltaFileTimestamp()) {
+          dataBlock.setDeletedRowsMap(deletedRecordsMap);
+          dataBlock.setDeleteDeltaTimestamp(deleteDeltaInfo.getLatestDeleteDeltaFileTimestamp());
+        }
+      }
+    }
+  }
+
   @Override public boolean hasNext() {
     if ((dataBlockIterator != null && dataBlockIterator.hasNext())) {
       return true;

http://git-wip-us.apache.org/repos/asf/carbondata/blob/da952e82/core/src/main/java/org/apache/carbondata/core/scan/scanner/AbstractBlockletScanner.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/scanner/AbstractBlockletScanner.java b/core/src/main/java/org/apache/carbondata/core/scan/scanner/AbstractBlockletScanner.java
index 0fb9782..f3d1336 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/scanner/AbstractBlockletScanner.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/scanner/AbstractBlockletScanner.java
@@ -23,8 +23,6 @@ import org.apache.carbondata.core.datastore.chunk.DimensionColumnDataChunk;
 import org.apache.carbondata.core.datastore.chunk.MeasureColumnDataChunk;
 import org.apache.carbondata.core.datastore.chunk.impl.DimensionRawColumnChunk;
 import org.apache.carbondata.core.datastore.chunk.impl.MeasureRawColumnChunk;
-import org.apache.carbondata.core.mutate.data.BlockletDeleteDeltaCacheLoader;
-import org.apache.carbondata.core.mutate.data.DeleteDeltaCacheLoaderIntf;
 import org.apache.carbondata.core.scan.executor.infos.BlockExecutionInfo;
 import org.apache.carbondata.core.scan.expression.exception.FilterUnsupportedException;
 import org.apache.carbondata.core.scan.processor.BlocksChunkHolder;
@@ -114,13 +112,6 @@ public abstract class AbstractBlockletScanner implements BlockletScanner {
       }
     }
     scannedResult.setNumberOfRows(numberOfRows);
-    // loading delete data cache in blockexecutioninfo instance
-    DeleteDeltaCacheLoaderIntf deleteCacheLoader =
-        new BlockletDeleteDeltaCacheLoader(scannedResult.getBlockletId(),
-            blocksChunkHolder.getDataBlock(), blockExecutionInfo.getAbsoluteTableIdentifier());
-    deleteCacheLoader.loadDeleteDeltaFileDataToCache();
-    scannedResult
-        .setBlockletDeleteDeltaCache(blocksChunkHolder.getDataBlock().getDeleteDeltaDataCache());
     scannedResult.setRawColumnChunks(dimensionRawColumnChunks);
     // adding statistics for carbon scan time
     QueryStatistic scanTime = queryStatisticsModel.getStatisticsTypeAndObjMap()

http://git-wip-us.apache.org/repos/asf/carbondata/blob/da952e82/core/src/main/java/org/apache/carbondata/core/scan/scanner/impl/FilterScanner.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/scanner/impl/FilterScanner.java b/core/src/main/java/org/apache/carbondata/core/scan/scanner/impl/FilterScanner.java
index 8f14b85..e710e40 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/scanner/impl/FilterScanner.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/scanner/impl/FilterScanner.java
@@ -26,8 +26,6 @@ import org.apache.carbondata.core.datastore.chunk.DimensionColumnDataChunk;
 import org.apache.carbondata.core.datastore.chunk.MeasureColumnDataChunk;
 import org.apache.carbondata.core.datastore.chunk.impl.DimensionRawColumnChunk;
 import org.apache.carbondata.core.datastore.chunk.impl.MeasureRawColumnChunk;
-import org.apache.carbondata.core.mutate.data.BlockletDeleteDeltaCacheLoader;
-import org.apache.carbondata.core.mutate.data.DeleteDeltaCacheLoaderIntf;
 import org.apache.carbondata.core.scan.executor.infos.BlockExecutionInfo;
 import org.apache.carbondata.core.scan.expression.exception.FilterUnsupportedException;
 import org.apache.carbondata.core.scan.filter.executer.FilterExecuter;
@@ -198,17 +196,9 @@ public class FilterScanner extends AbstractBlockletScanner {
         indexesGroup[k] = indexes;
       }
     }
-    // loading delete data cache in blockexecutioninfo instance
-    DeleteDeltaCacheLoaderIntf deleteCacheLoader =
-        new BlockletDeleteDeltaCacheLoader(scannedResult.getBlockletId(),
-            blocksChunkHolder.getDataBlock(), blockExecutionInfo.getAbsoluteTableIdentifier());
-    deleteCacheLoader.loadDeleteDeltaFileDataToCache();
-    scannedResult
-        .setBlockletDeleteDeltaCache(blocksChunkHolder.getDataBlock().getDeleteDeltaDataCache());
     FileHolder fileReader = blocksChunkHolder.getFileReader();
     int[][] allSelectedDimensionBlocksIndexes =
         blockExecutionInfo.getAllSelectedDimensionBlocksIndexes();
-
     long dimensionReadTime = System.currentTimeMillis();
     DimensionRawColumnChunk[] projectionListDimensionChunk = blocksChunkHolder.getDataBlock()
         .getDimensionChunks(fileReader, allSelectedDimensionBlocksIndexes);

http://git-wip-us.apache.org/repos/asf/carbondata/blob/da952e82/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentUpdateStatusManager.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentUpdateStatusManager.java b/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentUpdateStatusManager.java
index 6fab563..5e6e8de 100644
--- a/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentUpdateStatusManager.java
+++ b/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentUpdateStatusManager.java
@@ -261,7 +261,22 @@ public class SegmentUpdateStatusManager {
     return dataReader.getDeleteDataFromAllFiles(deltaFiles, blockletId);
   }
 
-
+  /**
+   * Below method will be used to get all the delete delta files based on block name
+   *
+   * @param blockFilePath actual block filePath
+   * @return all delete delta files
+   * @throws Exception
+   */
+  public String[] getDeleteDeltaFilePath(String blockFilePath) throws Exception {
+    int tableFactPathLength = CarbonStorePath
+        .getCarbonTablePath(absoluteTableIdentifier.getStorePath(),
+            absoluteTableIdentifier.getCarbonTableIdentifier()).getFactDir().length() + 1;
+    String blockame = blockFilePath.substring(tableFactPathLength);
+    String tupleId = CarbonTablePath.getShortBlockId(blockame);
+    return getDeltaFiles(tupleId, CarbonCommonConstants.DELETE_DELTA_FILE_EXT)
+        .toArray(new String[0]);
+  }
 
   /**
    * Returns all delta file paths of specified block
@@ -291,11 +306,8 @@ public class SegmentUpdateStatusManager {
       //blockName without timestamp
       final String blockNameFromTuple =
           blockNameWithoutExtn.substring(0, blockNameWithoutExtn.lastIndexOf("-"));
-      SegmentUpdateDetails[] listOfSegmentUpdateDetailsArray =
-          readLoadMetadata();
-      return getDeltaFiles(file, blockNameFromTuple, listOfSegmentUpdateDetailsArray, extension,
+      return getDeltaFiles(file, blockNameFromTuple, extension,
           segment);
-
     } catch (Exception ex) {
       String errorMsg = "Invalid tuple id " + tupleId;
       LOG.error(errorMsg);
@@ -345,12 +357,11 @@ public class SegmentUpdateStatusManager {
    * @param extension
    * @return
    */
-  public List<String> getDeltaFiles(CarbonFile blockDir, final String blockNameFromTuple,
-      SegmentUpdateDetails[] listOfSegmentUpdateDetailsArray,
+  private List<String> getDeltaFiles(CarbonFile blockDir, final String blockNameFromTuple,
       final String extension,
       String segment) {
-    List<String> deleteFileList = null;
-    for (SegmentUpdateDetails block : listOfSegmentUpdateDetailsArray) {
+    List<String> deleteFileList = new ArrayList<>();
+    for (SegmentUpdateDetails block : updateDetails) {
       if (block.getBlockName().equalsIgnoreCase(blockNameFromTuple) && block.getSegmentName()
           .equalsIgnoreCase(segment) && !CarbonUpdateUtil.isBlockInvalid(block.getStatus())) {
         final long deltaStartTimestamp = getStartTimeOfDeltaFile(extension, block);

http://git-wip-us.apache.org/repos/asf/carbondata/blob/da952e82/core/src/test/java/org/apache/carbondata/core/datastore/SegmentTaskIndexStoreTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/carbondata/core/datastore/SegmentTaskIndexStoreTest.java b/core/src/test/java/org/apache/carbondata/core/datastore/SegmentTaskIndexStoreTest.java
index c66398c..982fb50 100644
--- a/core/src/test/java/org/apache/carbondata/core/datastore/SegmentTaskIndexStoreTest.java
+++ b/core/src/test/java/org/apache/carbondata/core/datastore/SegmentTaskIndexStoreTest.java
@@ -62,7 +62,7 @@ public class SegmentTaskIndexStoreTest {
         <TableSegmentUniqueIdentifier, SegmentTaskIndexWrapper>
             createCache(CacheType.DRIVER_BTREE, "");
     tableBlockInfo = new TableBlockInfo("file", 0L, "SG100", locations, 10L,
-        ColumnarFormatVersion.valueOf(version));
+        ColumnarFormatVersion.valueOf(version), null);
     absoluteTableIdentifier = new AbsoluteTableIdentifier("/tmp",
         new CarbonTableIdentifier("testdatabase", "testtable", "TB100"));
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/da952e82/core/src/test/java/org/apache/carbondata/core/datastore/block/BlockInfoTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/carbondata/core/datastore/block/BlockInfoTest.java b/core/src/test/java/org/apache/carbondata/core/datastore/block/BlockInfoTest.java
index 08c22ec..1b7f106 100644
--- a/core/src/test/java/org/apache/carbondata/core/datastore/block/BlockInfoTest.java
+++ b/core/src/test/java/org/apache/carbondata/core/datastore/block/BlockInfoTest.java
@@ -27,7 +27,7 @@ public class BlockInfoTest {
   static BlockInfo blockInfo;
 
   @BeforeClass public static void setup() {
-    blockInfo = new BlockInfo(new TableBlockInfo("/filePath.carbondata", 6, "segmentId", null, 6, ColumnarFormatVersion.V1));
+    blockInfo = new BlockInfo(new TableBlockInfo("/filePath.carbondata", 6, "segmentId", null, 6, ColumnarFormatVersion.V1, null));
   }
 
   @Test public void hashCodeTest() {
@@ -43,7 +43,7 @@ public class BlockInfoTest {
 
   @Test public void equalsTestWithSimilarObject() {
     BlockInfo blockInfoTest =
-        new BlockInfo(new TableBlockInfo("/filePath.carbondata", 6, "segmentId", null, 6, ColumnarFormatVersion.V1));
+        new BlockInfo(new TableBlockInfo("/filePath.carbondata", 6, "segmentId", null, 6, ColumnarFormatVersion.V1, null));
     Boolean res = blockInfo.equals(blockInfoTest);
     assert (res);
   }
@@ -60,28 +60,28 @@ public class BlockInfoTest {
 
   @Test public void equalsTestWithDifferentSegmentId() {
     BlockInfo blockInfoTest =
-        new BlockInfo(new TableBlockInfo("/filePath.carbondata", 6, "diffSegmentId", null, 6, ColumnarFormatVersion.V1));
+        new BlockInfo(new TableBlockInfo("/filePath.carbondata", 6, "diffSegmentId", null, 6, ColumnarFormatVersion.V1, null));
     Boolean res = blockInfo.equals(blockInfoTest);
     assert (!res);
   }
 
   @Test public void equalsTestWithDifferentOffset() {
     BlockInfo blockInfoTest =
-        new BlockInfo(new TableBlockInfo("/filePath.carbondata", 62, "segmentId", null, 6, ColumnarFormatVersion.V1));
+        new BlockInfo(new TableBlockInfo("/filePath.carbondata", 62, "segmentId", null, 6, ColumnarFormatVersion.V1, null));
     Boolean res = blockInfo.equals(blockInfoTest);
     assert (!res);
   }
 
   @Test public void equalsTestWithDifferentBlockLength() {
     BlockInfo blockInfoTest =
-        new BlockInfo(new TableBlockInfo("/filePath.carbondata", 6, "segmentId", null, 62, ColumnarFormatVersion.V1));
+        new BlockInfo(new TableBlockInfo("/filePath.carbondata", 6, "segmentId", null, 62, ColumnarFormatVersion.V1, null));
     Boolean res = blockInfo.equals(blockInfoTest);
     assert (!res);
   }
 
   @Test public void equalsTestWithDiffFilePath() {
     BlockInfo blockInfoTest =
-        new BlockInfo(new TableBlockInfo("/diffFilePath.carbondata", 6, "segmentId", null, 62, ColumnarFormatVersion.V1));
+        new BlockInfo(new TableBlockInfo("/diffFilePath.carbondata", 6, "segmentId", null, 62, ColumnarFormatVersion.V1, null));
     Boolean res = blockInfoTest.equals(blockInfo);
     assert (!res);
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/da952e82/core/src/test/java/org/apache/carbondata/core/datastore/block/TableBlockInfoTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/carbondata/core/datastore/block/TableBlockInfoTest.java b/core/src/test/java/org/apache/carbondata/core/datastore/block/TableBlockInfoTest.java
index 840287e..f4553a6 100644
--- a/core/src/test/java/org/apache/carbondata/core/datastore/block/TableBlockInfoTest.java
+++ b/core/src/test/java/org/apache/carbondata/core/datastore/block/TableBlockInfoTest.java
@@ -33,8 +33,8 @@ public class TableBlockInfoTest {
   static TableBlockInfo tableBlockInfos;
 
   @BeforeClass public static void setup() {
-    tableBlockInfo = new TableBlockInfo("filePath", 4, "segmentId", null, 6, ColumnarFormatVersion.V1);
-    tableBlockInfos = new TableBlockInfo("filepath", 6, "5", null, 6, new BlockletInfos(6, 2, 2), ColumnarFormatVersion.V1);
+    tableBlockInfo = new TableBlockInfo("filePath", 4, "segmentId", null, 6, ColumnarFormatVersion.V1, null);
+    tableBlockInfos = new TableBlockInfo("filepath", 6, "5", null, 6, new BlockletInfos(6, 2, 2), ColumnarFormatVersion.V1, null);
   }
 
   @Test public void equalTestWithSameObject() {
@@ -43,7 +43,7 @@ public class TableBlockInfoTest {
   }
 
   @Test public void equalTestWithSimilarObject() {
-    TableBlockInfo tableBlockInfoTest = new TableBlockInfo("filePath", 4, "segmentId", null, 6, ColumnarFormatVersion.V1);
+    TableBlockInfo tableBlockInfoTest = new TableBlockInfo("filePath", 4, "segmentId", null, 6, ColumnarFormatVersion.V1, null);
     Boolean res = tableBlockInfo.equals(tableBlockInfoTest);
     assert (res);
   }
@@ -59,52 +59,52 @@ public class TableBlockInfoTest {
   }
 
   @Test public void equlsTestWithDiffSegmentId() {
-    TableBlockInfo tableBlockInfoTest = new TableBlockInfo("filePath", 4, "diffsegmentId", null, 6, ColumnarFormatVersion.V1);
+    TableBlockInfo tableBlockInfoTest = new TableBlockInfo("filePath", 4, "diffsegmentId", null, 6, ColumnarFormatVersion.V1, null);
     Boolean res = tableBlockInfo.equals(tableBlockInfoTest);
     assert (!res);
   }
 
   @Test public void equlsTestWithDiffBlockOffset() {
-    TableBlockInfo tableBlockInfoTest = new TableBlockInfo("filePath", 6, "segmentId", null, 6, ColumnarFormatVersion.V1);
+    TableBlockInfo tableBlockInfoTest = new TableBlockInfo("filePath", 6, "segmentId", null, 6, ColumnarFormatVersion.V1, null);
     Boolean res = tableBlockInfo.equals(tableBlockInfoTest);
     assert (!res);
   }
 
   @Test public void equalsTestWithDiffBlockLength() {
-    TableBlockInfo tableBlockInfoTest = new TableBlockInfo("filePath", 4, "segmentId", null, 4, ColumnarFormatVersion.V1);
+    TableBlockInfo tableBlockInfoTest = new TableBlockInfo("filePath", 4, "segmentId", null, 4, ColumnarFormatVersion.V1, null);
     Boolean res = tableBlockInfo.equals(tableBlockInfoTest);
     assert (!res);
   }
 
   @Test public void equalsTestWithDiffBlockletNumber() {
     TableBlockInfo tableBlockInfoTest =
-        new TableBlockInfo("filepath", 6, "segmentId", null, 6, new BlockletInfos(6, 3, 2), ColumnarFormatVersion.V1);
+        new TableBlockInfo("filepath", 6, "segmentId", null, 6, new BlockletInfos(6, 3, 2), ColumnarFormatVersion.V1, null);
     Boolean res = tableBlockInfos.equals(tableBlockInfoTest);
     assert (!res);
   }
 
   @Test public void equalsTestWithDiffFilePath() {
     TableBlockInfo tableBlockInfoTest =
-        new TableBlockInfo("difffilepath", 6, "segmentId", null, 6, new BlockletInfos(6, 3, 2), ColumnarFormatVersion.V1);
+        new TableBlockInfo("difffilepath", 6, "segmentId", null, 6, new BlockletInfos(6, 3, 2), ColumnarFormatVersion.V1, null);
     Boolean res = tableBlockInfos.equals(tableBlockInfoTest);
     assert (!res);
   }
 
   @Test public void compareToTestForSegmentId() {
     TableBlockInfo tableBlockInfo =
-        new TableBlockInfo("difffilepath", 6, "5", null, 6, new BlockletInfos(6, 3, 2), ColumnarFormatVersion.V1);
+        new TableBlockInfo("difffilepath", 6, "5", null, 6, new BlockletInfos(6, 3, 2), ColumnarFormatVersion.V1, null);
     int res = tableBlockInfos.compareTo(tableBlockInfo);
     int expectedResult = 2;
     assertEquals(res, expectedResult);
 
     TableBlockInfo tableBlockInfo1 =
-        new TableBlockInfo("difffilepath", 6, "6", null, 6, new BlockletInfos(6, 3, 2), ColumnarFormatVersion.V1);
+        new TableBlockInfo("difffilepath", 6, "6", null, 6, new BlockletInfos(6, 3, 2), ColumnarFormatVersion.V1, null);
     int res1 = tableBlockInfos.compareTo(tableBlockInfo1);
     int expectedResult1 = -1;
     assertEquals(res1, expectedResult1);
 
     TableBlockInfo tableBlockInfo2 =
-        new TableBlockInfo("difffilepath", 6, "4", null, 6, new BlockletInfos(6, 3, 2), ColumnarFormatVersion.V1);
+        new TableBlockInfo("difffilepath", 6, "4", null, 6, new BlockletInfos(6, 3, 2), ColumnarFormatVersion.V1, null);
     int res2 = tableBlockInfos.compareTo(tableBlockInfo2);
     int expectedresult2 = 1;
     assertEquals(res2, expectedresult2);
@@ -129,18 +129,18 @@ public class TableBlockInfoTest {
 
     };
 
-    TableBlockInfo tableBlockInfo = new TableBlockInfo("difffilepaths", 6, "5", null, 3, ColumnarFormatVersion.V1);
+    TableBlockInfo tableBlockInfo = new TableBlockInfo("difffilepaths", 6, "5", null, 3, ColumnarFormatVersion.V1, null);
     int res = tableBlockInfos.compareTo(tableBlockInfo);
     int expectedResult = 7;
     assertEquals(res, expectedResult);
 
-    TableBlockInfo tableBlockInfo1 = new TableBlockInfo("filepath", 6, "5", null, 3, ColumnarFormatVersion.V1);
+    TableBlockInfo tableBlockInfo1 = new TableBlockInfo("filepath", 6, "5", null, 3, ColumnarFormatVersion.V1, null);
     int res1 = tableBlockInfos.compareTo(tableBlockInfo1);
     int expectedResult1 = 1;
     assertEquals(res1, expectedResult1);
 
     TableBlockInfo tableBlockInfoTest =
-        new TableBlockInfo("filePath", 6, "5", null, 7, new BlockletInfos(6, 2, 2), ColumnarFormatVersion.V1);
+        new TableBlockInfo("filePath", 6, "5", null, 7, new BlockletInfos(6, 2, 2), ColumnarFormatVersion.V1, null);
     int res2 = tableBlockInfos.compareTo(tableBlockInfoTest);
     int expectedResult2 = -1;
     assertEquals(res2, expectedResult2);
@@ -148,13 +148,13 @@ public class TableBlockInfoTest {
 
   @Test public void compareToTestWithStartBlockletNo() {
     TableBlockInfo tableBlockInfo =
-        new TableBlockInfo("filepath", 6, "5", null, 6, new BlockletInfos(6, 3, 2), ColumnarFormatVersion.V1);
+        new TableBlockInfo("filepath", 6, "5", null, 6, new BlockletInfos(6, 3, 2), ColumnarFormatVersion.V1, null);
     int res = tableBlockInfos.compareTo(tableBlockInfo);
     int expectedresult =-1;
     assertEquals(res, expectedresult);
 
     TableBlockInfo tableBlockInfo1 =
-        new TableBlockInfo("filepath", 6, "5", null, 6, new BlockletInfos(6, 1, 2), ColumnarFormatVersion.V1);
+        new TableBlockInfo("filepath", 6, "5", null, 6, new BlockletInfos(6, 1, 2), ColumnarFormatVersion.V1, null);
     int res1 = tableBlockInfos.compareTo(tableBlockInfo1);
     int expectedresult1 = 1;
     assertEquals(res1, expectedresult1);

http://git-wip-us.apache.org/repos/asf/carbondata/blob/da952e82/core/src/test/java/org/apache/carbondata/core/datastore/block/TableTaskInfoTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/carbondata/core/datastore/block/TableTaskInfoTest.java b/core/src/test/java/org/apache/carbondata/core/datastore/block/TableTaskInfoTest.java
index 52c56d3..ccc7af6 100644
--- a/core/src/test/java/org/apache/carbondata/core/datastore/block/TableTaskInfoTest.java
+++ b/core/src/test/java/org/apache/carbondata/core/datastore/block/TableTaskInfoTest.java
@@ -33,10 +33,10 @@ public class TableTaskInfoTest {
     tableBlockInfoList = new ArrayList<>(5);
 
     String[] locations = { "loc1", "loc2", "loc3" };
-    tableBlockInfoList.add(0, new TableBlockInfo("filePath", 2, "segmentID", locations, 6, ColumnarFormatVersion.V1));
+    tableBlockInfoList.add(0, new TableBlockInfo("filePath", 2, "segmentID", locations, 6, ColumnarFormatVersion.V1, null));
 
     String[] locs = { "loc4", "loc5" };
-    tableBlockInfoList.add(1, new TableBlockInfo("filepath", 2, "segmentId", locs, 6, ColumnarFormatVersion.V1));
+    tableBlockInfoList.add(1, new TableBlockInfo("filepath", 2, "segmentId", locs, 6, ColumnarFormatVersion.V1, null));
 
     tableTaskInfo = new TableTaskInfo("taskId", tableBlockInfoList);
   }
@@ -67,10 +67,10 @@ public class TableTaskInfoTest {
     List<TableBlockInfo> tableBlockInfoListTest = new ArrayList<>();
 
     String[] locations = { "loc1", "loc2", "loc3" };
-    tableBlockInfoListTest.add(0, new TableBlockInfo("filePath", 2, "segmentID", locations, 6, ColumnarFormatVersion.V1));
+    tableBlockInfoListTest.add(0, new TableBlockInfo("filePath", 2, "segmentID", locations, 6, ColumnarFormatVersion.V1, null));
 
     String[] locations1 = { "loc1", "loc2", "loc3" };
-    tableBlockInfoListTest.add(1, new TableBlockInfo("filePath", 2, "segmentID", locations1, 6, ColumnarFormatVersion.V1));
+    tableBlockInfoListTest.add(1, new TableBlockInfo("filePath", 2, "segmentID", locations1, 6, ColumnarFormatVersion.V1, null));
 
     List<String> res = TableTaskInfo.maxNoNodes(tableBlockInfoListTest);
     assert (res.equals(locs));

http://git-wip-us.apache.org/repos/asf/carbondata/blob/da952e82/core/src/test/java/org/apache/carbondata/core/util/CarbonUtilTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/carbondata/core/util/CarbonUtilTest.java b/core/src/test/java/org/apache/carbondata/core/util/CarbonUtilTest.java
index 9adf4d4..badf63e 100644
--- a/core/src/test/java/org/apache/carbondata/core/util/CarbonUtilTest.java
+++ b/core/src/test/java/org/apache/carbondata/core/util/CarbonUtilTest.java
@@ -516,7 +516,7 @@ public class CarbonUtilTest {
       }
     };
     TableBlockInfo info =
-        new TableBlockInfo("file:/", 1, "0", new String[0], 1, ColumnarFormatVersion.V1);
+        new TableBlockInfo("file:/", 1, "0", new String[0], 1, ColumnarFormatVersion.V1, null);
 
     assertEquals(CarbonUtil.readMetadatFile(info).getVersionId().number(), 1);
   }
@@ -525,7 +525,7 @@ public class CarbonUtilTest {
   public void testToReadMetadatFileWithException()
       throws Exception {
     TableBlockInfo info =
-        new TableBlockInfo("file:/", 1, "0", new String[0], 1, ColumnarFormatVersion.V1);
+        new TableBlockInfo("file:/", 1, "0", new String[0], 1, ColumnarFormatVersion.V1, null);
     CarbonUtil.readMetadatFile(info);
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/da952e82/core/src/test/java/org/apache/carbondata/core/util/DataFileFooterConverterTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/carbondata/core/util/DataFileFooterConverterTest.java b/core/src/test/java/org/apache/carbondata/core/util/DataFileFooterConverterTest.java
index 83c7fa4..8161fae 100644
--- a/core/src/test/java/org/apache/carbondata/core/util/DataFileFooterConverterTest.java
+++ b/core/src/test/java/org/apache/carbondata/core/util/DataFileFooterConverterTest.java
@@ -142,12 +142,14 @@ public class DataFileFooterConverterTest {
       }
     };
     String[] arr = { "a", "b", "c" };
-    TableBlockInfo tableBlockInfo = new TableBlockInfo("/file.carbondata", 3, "id", arr, 3, ColumnarFormatVersion.V1);
+    String fileName = "/part-0-0_batchno0-0-1495074251740.carbondata";
+    TableBlockInfo tableBlockInfo = new TableBlockInfo(fileName, 3, "id", arr, 3, ColumnarFormatVersion.V1, null);
     tableBlockInfo.getBlockletInfos().setNoOfBlockLets(3);
     List<TableBlockInfo> tableBlockInfoList = new ArrayList<>();
     tableBlockInfoList.add(tableBlockInfo);
+    String idxFileName = "0_batchno0-0-1495074251740.carbonindex";
     List<DataFileFooter> dataFileFooterList =
-        dataFileFooterConverter.getIndexInfo("indexfile", tableBlockInfoList);
+        dataFileFooterConverter.getIndexInfo(idxFileName, tableBlockInfoList);
     byte[] exp = dataFileFooterList.get(0).getBlockletIndex().getBtreeIndex().getStartKey();
     byte[] res = "1".getBytes();
     for (int i = 0; i < exp.length; i++) {
@@ -244,7 +246,7 @@ public class DataFileFooterConverterTest {
     segmentInfo.setNumberOfColumns(segmentInfo1.getNum_cols());
     dataFileFooter.setNumberOfRows(3);
     dataFileFooter.setSegmentInfo(segmentInfo);
-    TableBlockInfo info = new TableBlockInfo("/file.carbondata", 1, "0", new String[0], 1, ColumnarFormatVersion.V1);
+    TableBlockInfo info = new TableBlockInfo("/file.carbondata", 1, "0", new String[0], 1, ColumnarFormatVersion.V1, null);
     DataFileFooter result = dataFileFooterConverter.readDataFileFooter(info);
     assertEquals(result.getNumberOfRows(), 3);
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/da952e82/hadoop/src/main/java/org/apache/carbondata/hadoop/CarbonInputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop/src/main/java/org/apache/carbondata/hadoop/CarbonInputFormat.java b/hadoop/src/main/java/org/apache/carbondata/hadoop/CarbonInputFormat.java
index cda34e4..5d9bbe7 100644
--- a/hadoop/src/main/java/org/apache/carbondata/hadoop/CarbonInputFormat.java
+++ b/hadoop/src/main/java/org/apache/carbondata/hadoop/CarbonInputFormat.java
@@ -323,10 +323,17 @@ public class CarbonInputFormat<T> extends FileInputFormat<Void, T> {
             updateStatusManager)) {
           continue;
         }
+        String[] deleteDeltaFilePath = null;
+        try {
+          deleteDeltaFilePath =
+              updateStatusManager.getDeleteDeltaFilePath(tableBlockInfo.getFilePath());
+        } catch (Exception e) {
+          throw new IOException(e);
+        }
         result.add(new CarbonInputSplit(segmentNo, new Path(tableBlockInfo.getFilePath()),
             tableBlockInfo.getBlockOffset(), tableBlockInfo.getBlockLength(),
             tableBlockInfo.getLocations(), tableBlockInfo.getBlockletInfos().getNoOfBlockLets(),
-            tableBlockInfo.getVersion()));
+            tableBlockInfo.getVersion(), deleteDeltaFilePath));
       }
     }
     return result;
@@ -429,7 +436,7 @@ public class CarbonInputFormat<T> extends FileInputFormat<Void, T> {
             new TableBlockInfo(carbonInputSplit.getPath().toString(), carbonInputSplit.getStart(),
                 tableSegmentUniqueIdentifier.getSegmentId(), carbonInputSplit.getLocations(),
                 carbonInputSplit.getLength(), blockletInfos, carbonInputSplit.getVersion(),
-                carbonInputSplit.getBlockStorageIdMap()));
+                carbonInputSplit.getBlockStorageIdMap(), carbonInputSplit.getDeleteDeltaFiles()));
       }
     }
     return tableBlockInfoList;

http://git-wip-us.apache.org/repos/asf/carbondata/blob/da952e82/hadoop/src/main/java/org/apache/carbondata/hadoop/CarbonInputSplit.java
----------------------------------------------------------------------
diff --git a/hadoop/src/main/java/org/apache/carbondata/hadoop/CarbonInputSplit.java b/hadoop/src/main/java/org/apache/carbondata/hadoop/CarbonInputSplit.java
index 08661a2..631bc2c 100644
--- a/hadoop/src/main/java/org/apache/carbondata/hadoop/CarbonInputSplit.java
+++ b/hadoop/src/main/java/org/apache/carbondata/hadoop/CarbonInputSplit.java
@@ -72,6 +72,11 @@ public class CarbonInputSplit extends FileSplit
 
   private List<UpdateVO> invalidTimestampsList;
 
+  /**
+   * list of delete delta files for split
+   */
+  private String[] deleteDeltaFiles;
+
   public CarbonInputSplit() {
     segmentId = null;
     taskId = "0";
@@ -82,7 +87,7 @@ public class CarbonInputSplit extends FileSplit
   }
 
   private CarbonInputSplit(String segmentId, Path path, long start, long length, String[] locations,
-      ColumnarFormatVersion version) {
+      ColumnarFormatVersion version, String[] deleteDeltaFiles) {
     super(path, start, length, locations);
     this.segmentId = segmentId;
     String taskNo = CarbonTablePath.DataFileUtil.getTaskNo(path.getName());
@@ -93,11 +98,12 @@ public class CarbonInputSplit extends FileSplit
     this.bucketId = CarbonTablePath.DataFileUtil.getBucketNo(path.getName());
     this.invalidSegments = new ArrayList<>();
     this.version = version;
+    this.deleteDeltaFiles = deleteDeltaFiles;
   }
 
   public CarbonInputSplit(String segmentId, Path path, long start, long length, String[] locations,
-      int numberOfBlocklets, ColumnarFormatVersion version) {
-    this(segmentId, path, start, length, locations, version);
+      int numberOfBlocklets, ColumnarFormatVersion version, String[] deleteDeltaFiles) {
+    this(segmentId, path, start, length, locations, version, deleteDeltaFiles);
     this.numberOfBlocklets = numberOfBlocklets;
   }
 
@@ -113,8 +119,9 @@ public class CarbonInputSplit extends FileSplit
    * @param blockStorageIdMap
    */
   public CarbonInputSplit(String segmentId, Path path, long start, long length, String[] locations,
-      int numberOfBlocklets, ColumnarFormatVersion version, Map<String, String> blockStorageIdMap) {
-    this(segmentId, path, start, length, locations, numberOfBlocklets, version);
+      int numberOfBlocklets, ColumnarFormatVersion version, Map<String, String> blockStorageIdMap,
+      String[] deleteDeltaFiles) {
+    this(segmentId, path, start, length, locations, numberOfBlocklets, version, deleteDeltaFiles);
     this.blockStorageIdMap = blockStorageIdMap;
   }
 
@@ -122,7 +129,7 @@ public class CarbonInputSplit extends FileSplit
       ColumnarFormatVersion version)
       throws IOException {
     return new CarbonInputSplit(segmentId, split.getPath(), split.getStart(), split.getLength(),
-        split.getLocations(), version);
+        split.getLocations(), version, null);
   }
 
   public static List<TableBlockInfo> createBlocks(List<CarbonInputSplit> splitList) {
@@ -133,7 +140,8 @@ public class CarbonInputSplit extends FileSplit
       try {
         tableBlockInfoList.add(
             new TableBlockInfo(split.getPath().toString(), split.getStart(), split.getSegmentId(),
-                split.getLocations(), split.getLength(), blockletInfos, split.getVersion()));
+                split.getLocations(), split.getLength(), blockletInfos, split.getVersion(),
+                split.getDeleteDeltaFiles()));
       } catch (IOException e) {
         throw new RuntimeException("fail to get location of split: " + split, e);
       }
@@ -147,7 +155,7 @@ public class CarbonInputSplit extends FileSplit
     try {
       return new TableBlockInfo(inputSplit.getPath().toString(), inputSplit.getStart(),
           inputSplit.getSegmentId(), inputSplit.getLocations(), inputSplit.getLength(),
-          blockletInfos, inputSplit.getVersion());
+          blockletInfos, inputSplit.getVersion(), inputSplit.getDeleteDeltaFiles());
     } catch (IOException e) {
       throw new RuntimeException("fail to get location of split: " + inputSplit, e);
     }
@@ -167,6 +175,11 @@ public class CarbonInputSplit extends FileSplit
     for (int i = 0; i < numInvalidSegment; i++) {
       invalidSegments.add(in.readUTF());
     }
+    int numberOfDeleteDeltaFiles = in.readInt();
+    deleteDeltaFiles = new String[numberOfDeleteDeltaFiles];
+    for (int i = 0; i < numberOfDeleteDeltaFiles; i++) {
+      deleteDeltaFiles[i] = in.readUTF();
+    }
   }
 
   @Override public void write(DataOutput out) throws IOException {
@@ -178,6 +191,12 @@ public class CarbonInputSplit extends FileSplit
     for (String invalidSegment : invalidSegments) {
       out.writeUTF(invalidSegment);
     }
+    out.writeInt(null != deleteDeltaFiles ? deleteDeltaFiles.length : 0);
+    if (null != deleteDeltaFiles) {
+      for (int i = 0; i < deleteDeltaFiles.length; i++) {
+        out.writeUTF(deleteDeltaFiles[i]);
+      }
+    }
   }
 
   public List<String> getInvalidSegments() {
@@ -287,4 +306,8 @@ public class CarbonInputSplit extends FileSplit
   public Map<String, String> getBlockStorageIdMap() {
     return blockStorageIdMap;
   }
+
+  public String[] getDeleteDeltaFiles() {
+    return deleteDeltaFiles;
+  }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/da952e82/hadoop/src/main/java/org/apache/carbondata/hadoop/internal/index/impl/InMemoryBTreeIndex.java
----------------------------------------------------------------------
diff --git a/hadoop/src/main/java/org/apache/carbondata/hadoop/internal/index/impl/InMemoryBTreeIndex.java b/hadoop/src/main/java/org/apache/carbondata/hadoop/internal/index/impl/InMemoryBTreeIndex.java
index 7ba6133..f9dc178 100644
--- a/hadoop/src/main/java/org/apache/carbondata/hadoop/internal/index/impl/InMemoryBTreeIndex.java
+++ b/hadoop/src/main/java/org/apache/carbondata/hadoop/internal/index/impl/InMemoryBTreeIndex.java
@@ -90,7 +90,7 @@ class InMemoryBTreeIndex implements Index {
       result.add(new CarbonInputSplit(segment.getId(), new Path(tableBlockInfo.getFilePath()),
           tableBlockInfo.getBlockOffset(), tableBlockInfo.getBlockLength(),
           tableBlockInfo.getLocations(), tableBlockInfo.getBlockletInfos().getNoOfBlockLets(),
-          tableBlockInfo.getVersion()));
+          tableBlockInfo.getVersion(), null));
     }
     return result;
   }
@@ -142,7 +142,8 @@ class InMemoryBTreeIndex implements Index {
       tableBlockInfoList.add(
           new TableBlockInfo(carbonInputSplit.getPath().toString(), carbonInputSplit.getStart(),
               segment.getId(), carbonInputSplit.getLocations(), carbonInputSplit.getLength(),
-              blockletInfos, carbonInputSplit.getVersion()));
+              blockletInfos, carbonInputSplit.getVersion(),
+              carbonInputSplit.getDeleteDeltaFiles()));
     }
     return tableBlockInfoList;
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/da952e82/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala
index 4ebbf60..2898870 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala
@@ -300,7 +300,8 @@ class CarbonMergerRDD[K, V](
       carbonInputSplits ++:= splits.asScala.map(_.asInstanceOf[CarbonInputSplit]).filter(entry => {
         val blockInfo = new TableBlockInfo(entry.getPath.toString,
           entry.getStart, entry.getSegmentId,
-          entry.getLocations, entry.getLength, entry.getVersion
+          entry.getLocations, entry.getLength, entry.getVersion,
+          updateStatusManager.getDeleteDeltaFilePath(entry.getPath.toString)
         )
         !CarbonUtil
           .isInvalidTableBlock(blockInfo, updateDetails, updateStatusManager)

http://git-wip-us.apache.org/repos/asf/carbondata/blob/da952e82/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
index 3d2e35b..dfea7d7 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
@@ -564,7 +564,7 @@ object CarbonDataRDDFactory {
             val fileSplit = inputSplit.asInstanceOf[FileSplit]
             new TableBlockInfo(fileSplit.getPath.toString,
               fileSplit.getStart, "1",
-              fileSplit.getLocations, fileSplit.getLength, ColumnarFormatVersion.V1
+              fileSplit.getLocations, fileSplit.getLength, ColumnarFormatVersion.V1, null
             ).asInstanceOf[Distributable]
           }
           // group blocks to nodes, tasks

http://git-wip-us.apache.org/repos/asf/carbondata/blob/da952e82/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala b/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
index cab78fe..96a8062 100644
--- a/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
+++ b/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
@@ -577,7 +577,7 @@ object CarbonDataRDDFactory {
             val fileSplit = inputSplit.asInstanceOf[FileSplit]
             new TableBlockInfo(fileSplit.getPath.toString,
               fileSplit.getStart, "1",
-              fileSplit.getLocations, fileSplit.getLength, ColumnarFormatVersion.V1
+              fileSplit.getLocations, fileSplit.getLength, ColumnarFormatVersion.V1, null
             ).asInstanceOf[Distributable]
           }
           // group blocks to nodes, tasks


[12/42] carbondata git commit: Added batch sort to load options and added test cases

Posted by ra...@apache.org.
Added batch sort to load options and added test cases

Added sort_scope to load options

rebase

rebase


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/d734f530
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/d734f530
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/d734f530

Branch: refs/heads/branch-1.1
Commit: d734f53006308a675af30acefa798c814ada3329
Parents: 211c23b
Author: ravipesala <ra...@gmail.com>
Authored: Thu May 11 23:54:30 2017 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 12:56:20 2017 +0530

----------------------------------------------------------------------
 .../core/constants/CarbonCommonConstants.java   |  10 +-
 .../carbondata/hadoop/CarbonInputSplit.java     |  16 +-
 .../dataload/TestBatchSortDataLoad.scala        | 230 +++++++++++++++++++
 .../spark/sql/catalyst/CarbonDDLSqlParser.scala |  11 +-
 .../execution/command/carbonTableSchema.scala   |   4 +
 .../execution/command/carbonTableSchema.scala   |   4 +
 .../DataLoadFailAllTypeSortTest.scala           |  27 ++-
 .../processing/model/CarbonLoadModel.java       |  30 ++-
 .../newflow/DataLoadProcessBuilder.java         |  12 +-
 .../newflow/sort/SortScopeOptions.java          |  63 +++++
 .../processing/newflow/sort/SorterFactory.java  |   7 +-
 .../newflow/sort/unsafe/UnsafeSortDataRows.java |   5 +-
 .../sortandgroupby/sortdata/SortParameters.java |  13 ++
 .../util/CarbonDataProcessorUtil.java           |  51 ++++
 14 files changed, 449 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/d734f530/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
index 269a75f..e1f3e9d 100644
--- a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
+++ b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
@@ -1104,15 +1104,15 @@ public final class CarbonCommonConstants {
   /**
    * Sorts the data in batches and writes the batch data to store with index file.
    */
-  public static final String LOAD_USE_BATCH_SORT = "carbon.load.use.batch.sort";
+  public static final String LOAD_SORT_SCOPE = "carbon.load.sort.scope";
 
   /**
-   * If set to true, the sorting scope is smaller and more index tree will be created,
+   * If set to BATCH_SORT, the sorting scope is smaller and more index tree will be created,
    * thus loading is faster but query maybe slower.
-   * If set to false, the sorting scope is bigger and one index tree per data node will be created,
-   * thus loading is slower but query is faster.
+   * If set to LOCAL_SORT, the sorting scope is bigger and one index tree per data node will be
+   * created, thus loading is slower but query is faster.
    */
-  public static final String LOAD_USE_BATCH_SORT_DEFAULT = "false";
+  public static final String LOAD_SORT_SCOPE_DEFAULT = "LOCAL_SORT";
 
   /**
    * Size of batch data to keep in memory, as a thumb rule it supposed

http://git-wip-us.apache.org/repos/asf/carbondata/blob/d734f530/hadoop/src/main/java/org/apache/carbondata/hadoop/CarbonInputSplit.java
----------------------------------------------------------------------
diff --git a/hadoop/src/main/java/org/apache/carbondata/hadoop/CarbonInputSplit.java b/hadoop/src/main/java/org/apache/carbondata/hadoop/CarbonInputSplit.java
index 0dcaba2..08661a2 100644
--- a/hadoop/src/main/java/org/apache/carbondata/hadoop/CarbonInputSplit.java
+++ b/hadoop/src/main/java/org/apache/carbondata/hadoop/CarbonInputSplit.java
@@ -31,6 +31,7 @@ import org.apache.carbondata.core.datastore.block.Distributable;
 import org.apache.carbondata.core.datastore.block.TableBlockInfo;
 import org.apache.carbondata.core.metadata.ColumnarFormatVersion;
 import org.apache.carbondata.core.mutate.UpdateVO;
+import org.apache.carbondata.core.util.ByteUtil;
 import org.apache.carbondata.core.util.CarbonProperties;
 import org.apache.carbondata.core.util.path.CarbonTablePath;
 import org.apache.carbondata.hadoop.internal.index.Block;
@@ -84,7 +85,11 @@ public class CarbonInputSplit extends FileSplit
       ColumnarFormatVersion version) {
     super(path, start, length, locations);
     this.segmentId = segmentId;
-    this.taskId = CarbonTablePath.DataFileUtil.getTaskNo(path.getName());
+    String taskNo = CarbonTablePath.DataFileUtil.getTaskNo(path.getName());
+    if (taskNo.contains("_")) {
+      taskNo = taskNo.split("_")[0];
+    }
+    this.taskId = taskNo;
     this.bucketId = CarbonTablePath.DataFileUtil.getBucketNo(path.getName());
     this.invalidSegments = new ArrayList<>();
     this.version = version;
@@ -237,10 +242,11 @@ public class CarbonInputSplit extends FileSplit
     String filePath1 = this.getPath().getName();
     String filePath2 = other.getPath().getName();
     if (CarbonTablePath.isCarbonDataFile(filePath1)) {
-      int firstTaskId = Integer.parseInt(CarbonTablePath.DataFileUtil.getTaskNo(filePath1));
-      int otherTaskId = Integer.parseInt(CarbonTablePath.DataFileUtil.getTaskNo(filePath2));
-      if (firstTaskId != otherTaskId) {
-        return firstTaskId - otherTaskId;
+      byte[] firstTaskId = CarbonTablePath.DataFileUtil.getTaskNo(filePath1).getBytes();
+      byte[] otherTaskId = CarbonTablePath.DataFileUtil.getTaskNo(filePath2).getBytes();
+      int compare = ByteUtil.compare(firstTaskId, otherTaskId);
+      if (compare != 0) {
+        return compare;
       }
 
       int firstBucketNo = Integer.parseInt(CarbonTablePath.DataFileUtil.getBucketNo(filePath1));

http://git-wip-us.apache.org/repos/asf/carbondata/blob/d734f530/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestBatchSortDataLoad.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestBatchSortDataLoad.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestBatchSortDataLoad.scala
new file mode 100644
index 0000000..70007c6
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestBatchSortDataLoad.scala
@@ -0,0 +1,230 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.spark.testsuite.dataload
+
+import java.io.{BufferedWriter, File, FileWriter, FilenameFilter}
+
+import org.apache.spark.sql.common.util.QueryTest
+import org.apache.spark.sql.Row
+import org.scalatest.BeforeAndAfterAll
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.util.CarbonProperties
+
+class TestBatchSortDataLoad extends QueryTest with BeforeAndAfterAll {
+  var filePath: String = _
+
+
+  def buildTestData() = {
+    filePath = s"${integrationPath}/spark-common-test/target/big.csv"
+    val file = new File(filePath)
+    val writer = new BufferedWriter(new FileWriter(file))
+    writer.write("c1,c2,c3, c4, c5, c6, c7, c8, c9, c10")
+    writer.newLine()
+    for(i <- 0 until 200000) {
+      writer.write("a" + i%1000 + "," +
+                   "b" + i%1000 + "," +
+                   "c" + i%1000 + "," +
+                   "d" + i%1000 + "," +
+                   "e" + i%1000 + "," +
+                   "f" + i%1000 + "," +
+                   i%1000 + "," +
+                   i%1000 + "," +
+                   i%1000 + "," +
+                   i%1000 + "\n")
+      if ( i % 10000 == 0) {
+        writer.flush()
+      }
+    }
+    writer.close()
+  }
+
+  def dropTable() = {
+    sql("DROP TABLE IF EXISTS carbon_load1")
+    sql("DROP TABLE IF EXISTS carbon_load2")
+    sql("DROP TABLE IF EXISTS carbon_load3")
+    sql("DROP TABLE IF EXISTS carbon_load4")
+    sql("DROP TABLE IF EXISTS carbon_load5")
+    sql("DROP TABLE IF EXISTS carbon_load6")
+  }
+
+
+
+  override def beforeAll {
+    dropTable
+    buildTestData
+  }
+
+
+
+  test("test batch sort load by passing option to load command") {
+
+    sql(
+      """
+        | CREATE TABLE carbon_load1(c1 string, c2 string, c3 string, c4 string, c5 string,
+        | c6 string, c7 int, c8 int, c9 int, c10 int)
+        | STORED BY 'org.apache.carbondata.format'
+      """.stripMargin)
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath' into table carbon_load1 " +
+        s"OPTIONS('sort_scope'='batch_sort', 'batch_sort_size_inmb'='1')")
+
+    checkAnswer(sql("select count(*) from carbon_load1"), Seq(Row(200000)))
+
+    assert(getIndexfileCount("carbon_load1") == 12, "Something wrong in batch sort")
+  }
+
+  test("test batch sort load by passing option to load command and compare with normal load") {
+
+    sql(
+      """
+        | CREATE TABLE carbon_load2(c1 string, c2 string, c3 string, c4 string, c5 string,
+        | c6 string, c7 int, c8 int, c9 int, c10 int)
+        | STORED BY 'org.apache.carbondata.format'
+      """.stripMargin)
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath' into table carbon_load2 ")
+
+    checkAnswer(sql("select * from carbon_load1 where c1='a1' order by c1"),
+      sql("select * from carbon_load2 where c1='a1' order by c1"))
+
+  }
+
+  test("test batch sort load by passing option and compaction") {
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath' into table carbon_load1 " +
+        s"OPTIONS('sort_scope'='batch_sort', 'batch_sort_size_inmb'='1')")
+    sql(s"LOAD DATA LOCAL INPATH '$filePath' into table carbon_load1 " +
+        s"OPTIONS('sort_scope'='batch_sort', 'batch_sort_size_inmb'='1')")
+    sql(s"LOAD DATA LOCAL INPATH '$filePath' into table carbon_load1 " +
+        s"OPTIONS('sort_scope'='batch_sort', 'batch_sort_size_inmb'='1')")
+    sql("alter table carbon_load1 compact 'major'")
+    Thread.sleep(4000)
+    checkAnswer(sql("select count(*) from carbon_load1"), Seq(Row(800000)))
+
+    assert(getIndexfileCount("carbon_load1", "0.1") == 1, "Something wrong in compaction after batch sort")
+
+  }
+
+  test("test batch sort load by passing option in one load and with out option in other load and then do compaction") {
+
+    sql(
+      """
+        | CREATE TABLE carbon_load5(c1 string, c2 string, c3 string, c4 string, c5 string,
+        | c6 string, c7 int, c8 int, c9 int, c10 int)
+        | STORED BY 'org.apache.carbondata.format'
+      """.stripMargin)
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath' into table carbon_load5 " +
+        s"OPTIONS('sort_scope'='batch_sort', 'batch_sort_size_inmb'='1')")
+    sql(s"LOAD DATA LOCAL INPATH '$filePath' into table carbon_load5 ")
+    sql(s"LOAD DATA LOCAL INPATH '$filePath' into table carbon_load5 " +
+        s"OPTIONS('sort_scope'='batch_sort', 'batch_sort_size_inmb'='1')")
+    sql(s"LOAD DATA LOCAL INPATH '$filePath' into table carbon_load5 ")
+
+    checkAnswer(sql("select count(*) from carbon_load5"), Seq(Row(800000)))
+
+    checkAnswer(sql("select * from carbon_load1 where c1='a1' order by c1"),
+      sql("select * from carbon_load5 where c1='a1' order by c1"))
+
+    sql("alter table carbon_load5 compact 'major'")
+    Thread.sleep(4000)
+
+    assert(getIndexfileCount("carbon_load5", "0.1") == 1,
+      "Something wrong in compaction after batch sort")
+
+    checkAnswer(sql("select * from carbon_load1 where c1='a1' order by c1"),
+      sql("select * from carbon_load5 where c1='a1' order by c1"))
+
+  }
+
+  test("test batch sort load by passing option with single pass") {
+
+    sql(
+      """
+        | CREATE TABLE carbon_load3(c1 string, c2 string, c3 string, c4 string, c5 string,
+        | c6 string, c7 int, c8 int, c9 int, c10 int)
+        | STORED BY 'org.apache.carbondata.format'
+      """.stripMargin)
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath' into table carbon_load3 " +
+        s"OPTIONS('sort_scope'='batch_sort', 'batch_sort_size_inmb'='1', 'single_pass'='true')")
+
+    checkAnswer(sql("select count(*) from carbon_load3"), Seq(Row(200000)))
+
+    assert(getIndexfileCount("carbon_load3") == 12, "Something wrong in batch sort")
+
+    checkAnswer(sql("select * from carbon_load3 where c1='a1' order by c1"),
+      sql("select * from carbon_load2 where c1='a1' order by c1"))
+
+  }
+
+  test("test batch sort load by with out passing option but through carbon properties") {
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.LOAD_SORT_SCOPE, "BATCH_SORT")
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.LOAD_BATCH_SORT_SIZE_INMB, "1")
+    sql(
+      """
+        | CREATE TABLE carbon_load4(c1 string, c2 string, c3 string, c4 string, c5 string,
+        | c6 string, c7 int, c8 int, c9 int, c10 int)
+        | STORED BY 'org.apache.carbondata.format'
+      """.stripMargin)
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath' into table carbon_load4 " )
+
+    checkAnswer(sql("select count(*) from carbon_load4"), Seq(Row(200000)))
+
+    assert(getIndexfileCount("carbon_load4") == 12, "Something wrong in batch sort")
+    CarbonProperties.getInstance().
+      addProperty(CarbonCommonConstants.LOAD_SORT_SCOPE,
+        CarbonCommonConstants.LOAD_SORT_SCOPE_DEFAULT)
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.LOAD_BATCH_SORT_SIZE_INMB, "0")
+  }
+
+  test("test batch sort load by with out passing option but through carbon properties with default size") {
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.LOAD_SORT_SCOPE, "BATCH_SORT")
+    sql(
+      """
+        | CREATE TABLE carbon_load6(c1 string, c2 string, c3 string, c4 string, c5 string,
+        | c6 string, c7 int, c8 int, c9 int, c10 int)
+        | STORED BY 'org.apache.carbondata.format'
+      """.stripMargin)
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath' into table carbon_load6 " )
+
+    checkAnswer(sql("select count(*) from carbon_load6"), Seq(Row(200000)))
+
+    assert(getIndexfileCount("carbon_load6") == 1, "Something wrong in batch sort")
+    CarbonProperties.getInstance().
+      addProperty(CarbonCommonConstants.LOAD_SORT_SCOPE,
+        CarbonCommonConstants.LOAD_SORT_SCOPE_DEFAULT)
+  }
+
+  def getIndexfileCount(tableName: String, segmentNo: String = "0"): Int = {
+    val store  = storeLocation +"/default/"+ tableName + "/Fact/Part0/Segment_"+segmentNo
+    val list = new File(store).list(new FilenameFilter {
+      override def accept(dir: File, name: String) = name.endsWith(".carbonindex")
+    })
+    list.size
+  }
+
+  override def afterAll {
+    dropTable
+    new File(filePath).delete()
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/carbondata/blob/d734f530/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
index afc4a58..a701c72 100644
--- a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
+++ b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
@@ -35,6 +35,7 @@ import org.apache.carbondata.core.constants.CarbonCommonConstants
 import org.apache.carbondata.core.metadata.datatype.DataType
 import org.apache.carbondata.core.util.DataTypeUtil
 import org.apache.carbondata.processing.constants.LoggerAction
+import org.apache.carbondata.processing.newflow.sort.SortScopeOptions
 import org.apache.carbondata.spark.exception.MalformedCarbonCommandException
 import org.apache.carbondata.spark.util.CommonUtil
 
@@ -753,7 +754,7 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
       "COMPLEX_DELIMITER_LEVEL_1", "COMPLEX_DELIMITER_LEVEL_2", "COLUMNDICT",
       "SERIALIZATION_NULL_FORMAT", "BAD_RECORDS_LOGGER_ENABLE", "BAD_RECORDS_ACTION",
       "ALL_DICTIONARY_PATH", "MAXCOLUMNS", "COMMENTCHAR", "DATEFORMAT",
-      "SINGLE_PASS", "IS_EMPTY_DATA_BAD_RECORD"
+      "SINGLE_PASS", "IS_EMPTY_DATA_BAD_RECORD", "SORT_SCOPE", "BATCH_SORT_SIZE_INMB"
     )
     var isSupported = true
     val invalidOptions = StringBuilder.newBuilder
@@ -808,6 +809,14 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
       }
     }
 
+    if (options.exists(_._1.equalsIgnoreCase("SORT_SCOPE"))) {
+      val optionValue: String = options.get("sort_scope").get.head._2
+      if (!SortScopeOptions.isValidSortOption(optionValue)) {
+        throw new MalformedCarbonCommandException(
+          "option SORT_SCOPE can have option either BATCH_SORT or LOCAL_SORT or GLOBAL_SORT")
+      }
+    }
+
     // check for duplicate options
     val duplicateOptions = options filter {
       case (_, optionlist) => optionlist.size > 1

http://git-wip-us.apache.org/repos/asf/carbondata/blob/d734f530/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
index 1192e08..494beff 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
@@ -405,6 +405,8 @@ case class LoadTable(
       val dateFormat = options.getOrElse("dateformat", null)
       validateDateFormat(dateFormat, table)
       val maxColumns = options.getOrElse("maxcolumns", null)
+      val sortScope = options.getOrElse("sort_scope", null)
+      val batchSortSizeInMB = options.getOrElse("batch_sort_size_inmb", null)
 
       carbonLoadModel.setEscapeChar(checkDefaultValue(escapeChar, "\\"))
       carbonLoadModel.setQuoteChar(checkDefaultValue(quoteChar, "\""))
@@ -428,6 +430,8 @@ case class LoadTable(
       carbonLoadModel
         .setIsEmptyDataBadRecord(
           DataLoadProcessorConstants.IS_EMPTY_DATA_BAD_RECORD + "," + isEmptyDataBadRecord)
+      carbonLoadModel.setSortScope(sortScope)
+      carbonLoadModel.setBatchSortSizeInMb(batchSortSizeInMB)
       // when single_pass=true, and not use all dict
       val useOnePass = options.getOrElse("single_pass", "false").trim.toLowerCase match {
         case "true" =>

http://git-wip-us.apache.org/repos/asf/carbondata/blob/d734f530/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
index e2405f2..09824d8 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
@@ -417,6 +417,8 @@ case class LoadTable(
       val dateFormat = options.getOrElse("dateformat", null)
       validateDateFormat(dateFormat, table)
       val maxColumns = options.getOrElse("maxcolumns", null)
+      val sortScope = options.getOrElse("sort_scope", null)
+      val batchSortSizeInMB = options.getOrElse("batch_sort_size_inmb", null)
       carbonLoadModel.setEscapeChar(checkDefaultValue(escapeChar, "\\"))
       carbonLoadModel.setQuoteChar(checkDefaultValue(quoteChar, "\""))
       carbonLoadModel.setCommentChar(checkDefaultValue(commentChar, "#"))
@@ -439,6 +441,8 @@ case class LoadTable(
       carbonLoadModel
         .setIsEmptyDataBadRecord(
           DataLoadProcessorConstants.IS_EMPTY_DATA_BAD_RECORD + "," + isEmptyDataBadRecord)
+      carbonLoadModel.setSortScope(sortScope)
+      carbonLoadModel.setBatchSortSizeInMb(batchSortSizeInMB)
       val useOnePass = options.getOrElse("single_pass", "false").trim.toLowerCase match {
         case "true" =>
           true

http://git-wip-us.apache.org/repos/asf/carbondata/blob/d734f530/integration/spark2/src/test/scala/org/apache/spark/carbondata/DataLoadFailAllTypeSortTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/spark/carbondata/DataLoadFailAllTypeSortTest.scala b/integration/spark2/src/test/scala/org/apache/spark/carbondata/DataLoadFailAllTypeSortTest.scala
index 0465aa7..5e91574 100644
--- a/integration/spark2/src/test/scala/org/apache/spark/carbondata/DataLoadFailAllTypeSortTest.scala
+++ b/integration/spark2/src/test/scala/org/apache/spark/carbondata/DataLoadFailAllTypeSortTest.scala
@@ -116,9 +116,9 @@ class DataLoadFailAllTypeSortTest extends QueryTest with BeforeAndAfterAll {
       CarbonProperties.getInstance()
         .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
       CarbonProperties.getInstance()
-        .addProperty(CarbonCommonConstants.LOAD_USE_BATCH_SORT, "true");
+        .addProperty(CarbonCommonConstants.LOAD_SORT_SCOPE, "batch_sort")
       CarbonProperties.getInstance()
-        .addProperty(CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION, "FAIL");
+        .addProperty(CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION, "FAIL")
       sql("create table data_bm(name String, dob long, weight int) " +
           "STORED BY 'org.apache.carbondata.format'")
       val testData = s"$resourcesPath/badrecords/dummy.csv"
@@ -132,7 +132,8 @@ class DataLoadFailAllTypeSortTest extends QueryTest with BeforeAndAfterAll {
     }
     finally {
       CarbonProperties.getInstance()
-        .addProperty(CarbonCommonConstants.LOAD_USE_BATCH_SORT, "false");
+        .addProperty(CarbonCommonConstants.LOAD_SORT_SCOPE,
+          CarbonCommonConstants.LOAD_SORT_SCOPE_DEFAULT);
       CarbonProperties.getInstance()
         .addProperty(CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION,
           CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION_DEFAULT);
@@ -148,9 +149,9 @@ class DataLoadFailAllTypeSortTest extends QueryTest with BeforeAndAfterAll {
       CarbonProperties.getInstance()
         .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
       CarbonProperties.getInstance()
-        .addProperty(CarbonCommonConstants.LOAD_USE_BATCH_SORT, "true");
+        .addProperty(CarbonCommonConstants.LOAD_SORT_SCOPE, "BATCH_SORT")
       CarbonProperties.getInstance()
-        .addProperty(CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION, "FORCE");
+        .addProperty(CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION, "FORCE")
       sql("create table data_bmf(name String, dob long, weight int) " +
           "STORED BY 'org.apache.carbondata.format'")
       val testData = s"$resourcesPath/badrecords/dummy.csv"
@@ -166,10 +167,11 @@ class DataLoadFailAllTypeSortTest extends QueryTest with BeforeAndAfterAll {
     }
     finally {
       CarbonProperties.getInstance()
-        .addProperty(CarbonCommonConstants.LOAD_USE_BATCH_SORT, "false");
+        .addProperty(CarbonCommonConstants.LOAD_SORT_SCOPE,
+          CarbonCommonConstants.LOAD_SORT_SCOPE_DEFAULT)
       CarbonProperties.getInstance()
         .addProperty(CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION,
-          CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION_DEFAULT);
+          CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION_DEFAULT)
     }
   }
 
@@ -182,7 +184,7 @@ class DataLoadFailAllTypeSortTest extends QueryTest with BeforeAndAfterAll {
       CarbonProperties.getInstance()
         .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
       CarbonProperties.getInstance()
-        .addProperty(CarbonCommonConstants.LOAD_USE_BATCH_SORT, "true");
+        .addProperty(CarbonCommonConstants.LOAD_SORT_SCOPE, "BATCH_SORT")
       sql("create table data_bm_no_good_data(name String, dob long, weight int) " +
           "STORED BY 'org.apache.carbondata.format'")
       val testData = s"$resourcesPath/badrecords/dummy2.csv"
@@ -198,10 +200,11 @@ class DataLoadFailAllTypeSortTest extends QueryTest with BeforeAndAfterAll {
     }
     finally {
       CarbonProperties.getInstance()
-        .addProperty(CarbonCommonConstants.LOAD_USE_BATCH_SORT, "false");
+        .addProperty(CarbonCommonConstants.LOAD_SORT_SCOPE,
+          CarbonCommonConstants.LOAD_SORT_SCOPE_DEFAULT)
       CarbonProperties.getInstance()
         .addProperty(CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION,
-          CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION_DEFAULT);
+          CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION_DEFAULT)
     }
   }
 
@@ -214,7 +217,7 @@ class DataLoadFailAllTypeSortTest extends QueryTest with BeforeAndAfterAll {
       CarbonProperties.getInstance()
         .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
       CarbonProperties.getInstance()
-        .addProperty(CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION, "FAIL");
+        .addProperty(CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION, "FAIL")
       sql("create table data_tbm(name String, dob long, weight int) " +
           "USING org.apache.spark.sql.CarbonSource OPTIONS('bucketnumber'='4', " +
           "'bucketcolumns'='name', 'tableName'='data_tbm')")
@@ -232,7 +235,7 @@ class DataLoadFailAllTypeSortTest extends QueryTest with BeforeAndAfterAll {
     finally {
       CarbonProperties.getInstance()
         .addProperty(CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION,
-          CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION_DEFAULT);
+          CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION_DEFAULT)
     }
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/d734f530/processing/src/main/java/org/apache/carbondata/processing/model/CarbonLoadModel.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/model/CarbonLoadModel.java b/processing/src/main/java/org/apache/carbondata/processing/model/CarbonLoadModel.java
index d8f84bf..3a2e2eb 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/model/CarbonLoadModel.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/model/CarbonLoadModel.java
@@ -171,7 +171,15 @@ public class CarbonLoadModel implements Serializable {
    */
   private boolean preFetch;
 
-  private String numberOfcolumns;
+  /**
+   * Batch sort should be enabled or not
+   */
+  private String sortScope;
+
+  /**
+   * Batch sort size in mb.
+   */
+  private String batchSortSizeInMb;
   /**
    * get escape char
    *
@@ -391,6 +399,8 @@ public class CarbonLoadModel implements Serializable {
     copy.dictionaryServerPort = dictionaryServerPort;
     copy.preFetch = preFetch;
     copy.isEmptyDataBadRecord = isEmptyDataBadRecord;
+    copy.sortScope = sortScope;
+    copy.batchSortSizeInMb = batchSortSizeInMb;
     return copy;
   }
 
@@ -442,6 +452,8 @@ public class CarbonLoadModel implements Serializable {
     copyObj.dictionaryServerPort = dictionaryServerPort;
     copyObj.preFetch = preFetch;
     copyObj.isEmptyDataBadRecord = isEmptyDataBadRecord;
+    copyObj.sortScope = sortScope;
+    copyObj.batchSortSizeInMb = batchSortSizeInMb;
     return copyObj;
   }
 
@@ -773,4 +785,20 @@ public class CarbonLoadModel implements Serializable {
   public void setIsEmptyDataBadRecord(String isEmptyDataBadRecord) {
     this.isEmptyDataBadRecord = isEmptyDataBadRecord;
   }
+
+  public String getSortScope() {
+    return sortScope;
+  }
+
+  public void setSortScope(String sortScope) {
+    this.sortScope = sortScope;
+  }
+
+  public String getBatchSortSizeInMb() {
+    return batchSortSizeInMb;
+  }
+
+  public void setBatchSortSizeInMb(String batchSortSizeInMb) {
+    this.batchSortSizeInMb = batchSortSizeInMb;
+  }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/d734f530/processing/src/main/java/org/apache/carbondata/processing/newflow/DataLoadProcessBuilder.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/newflow/DataLoadProcessBuilder.java b/processing/src/main/java/org/apache/carbondata/processing/newflow/DataLoadProcessBuilder.java
index 8865518..5c7c035 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/newflow/DataLoadProcessBuilder.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/newflow/DataLoadProcessBuilder.java
@@ -35,6 +35,8 @@ import org.apache.carbondata.core.metadata.schema.table.column.CarbonMeasure;
 import org.apache.carbondata.core.util.CarbonProperties;
 import org.apache.carbondata.processing.model.CarbonLoadModel;
 import org.apache.carbondata.processing.newflow.constants.DataLoadProcessorConstants;
+import org.apache.carbondata.processing.newflow.sort.SortScopeOptions;
+import org.apache.carbondata.processing.newflow.steps.CarbonRowDataWriterProcessorStepImpl;
 import org.apache.carbondata.processing.newflow.steps.DataConverterProcessorStepImpl;
 import org.apache.carbondata.processing.newflow.steps.DataConverterProcessorWithBucketingStepImpl;
 import org.apache.carbondata.processing.newflow.steps.DataWriterBatchProcessorStepImpl;
@@ -53,14 +55,12 @@ public final class DataLoadProcessBuilder {
 
   public AbstractDataLoadProcessorStep build(CarbonLoadModel loadModel, String storeLocation,
       CarbonIterator[] inputIterators) throws Exception {
-    boolean batchSort = Boolean.parseBoolean(CarbonProperties.getInstance()
-        .getProperty(CarbonCommonConstants.LOAD_USE_BATCH_SORT,
-            CarbonCommonConstants.LOAD_USE_BATCH_SORT_DEFAULT));
     CarbonDataLoadConfiguration configuration =
         createConfiguration(loadModel, storeLocation);
+    SortScopeOptions.SortScope sortScope = CarbonDataProcessorUtil.getSortScope(configuration);
     if (configuration.getBucketingInfo() != null) {
       return buildInternalForBucketing(inputIterators, configuration);
-    } else if (batchSort) {
+    } else if (sortScope.equals(SortScopeOptions.SortScope.BATCH_SORT)) {
       return buildInternalForBatchSort(inputIterators, configuration);
     } else {
       return buildInternal(inputIterators, configuration);
@@ -158,6 +158,10 @@ public final class DataLoadProcessBuilder {
         loadModel.getIsEmptyDataBadRecord().split(",")[1]);
     configuration.setDataLoadProperty(DataLoadProcessorConstants.FACT_FILE_PATH,
         loadModel.getFactFilePath());
+    configuration
+        .setDataLoadProperty(CarbonCommonConstants.LOAD_SORT_SCOPE, loadModel.getSortScope());
+    configuration.setDataLoadProperty(CarbonCommonConstants.LOAD_BATCH_SORT_SIZE_INMB,
+        loadModel.getBatchSortSizeInMb());
     CarbonMetadata.getInstance().addCarbonTable(carbonTable);
     List<CarbonDimension> dimensions =
         carbonTable.getDimensionByTableName(carbonTable.getFactTableName());

http://git-wip-us.apache.org/repos/asf/carbondata/blob/d734f530/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/SortScopeOptions.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/SortScopeOptions.java b/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/SortScopeOptions.java
new file mode 100644
index 0000000..f2534db
--- /dev/null
+++ b/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/SortScopeOptions.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.processing.newflow.sort;
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+
+/**
+ * Sort scope options
+ */
+public class SortScopeOptions {
+
+  public static SortScope getSortScope(String sortScope) {
+    if (sortScope == null) {
+      sortScope = CarbonCommonConstants.LOAD_SORT_SCOPE_DEFAULT;
+    }
+    switch (sortScope.toUpperCase()) {
+      case "BATCH_SORT":
+        return SortScope.BATCH_SORT;
+      case "LOCAL_SORT":
+        return SortScope.LOCAL_SORT;
+      case "NO_SORT":
+        return SortScope.NO_SORT;
+      default:
+        return SortScope.LOCAL_SORT;
+    }
+  }
+
+  public static boolean isValidSortOption(String sortScope) {
+    if (sortScope == null) {
+      return false;
+    }
+    switch (sortScope.toUpperCase()) {
+      case "BATCH_SORT":
+        return true;
+      case "LOCAL_SORT":
+        return true;
+      case "NO_SORT":
+        return true;
+      default:
+        return false;
+    }
+  }
+
+  public enum SortScope {
+    NO_SORT, BATCH_SORT, LOCAL_SORT;
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/carbondata/blob/d734f530/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/SorterFactory.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/SorterFactory.java b/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/SorterFactory.java
index 60cca69..39a21ad 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/SorterFactory.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/SorterFactory.java
@@ -29,6 +29,7 @@ import org.apache.carbondata.processing.newflow.sort.impl.ParallelReadMergeSorte
 import org.apache.carbondata.processing.newflow.sort.impl.UnsafeBatchParallelReadMergeSorterImpl;
 import org.apache.carbondata.processing.newflow.sort.impl.UnsafeParallelReadMergeSorterImpl;
 import org.apache.carbondata.processing.newflow.sort.impl.UnsafeParallelReadMergeSorterWithBucketingImpl;
+import org.apache.carbondata.processing.util.CarbonDataProcessorUtil;
 
 public class SorterFactory {
 
@@ -39,9 +40,7 @@ public class SorterFactory {
     boolean offheapsort = Boolean.parseBoolean(CarbonProperties.getInstance()
         .getProperty(CarbonCommonConstants.ENABLE_UNSAFE_SORT,
             CarbonCommonConstants.ENABLE_UNSAFE_SORT_DEFAULT));
-    boolean batchSort = Boolean.parseBoolean(CarbonProperties.getInstance()
-        .getProperty(CarbonCommonConstants.LOAD_USE_BATCH_SORT,
-            CarbonCommonConstants.LOAD_USE_BATCH_SORT_DEFAULT));
+    SortScopeOptions.SortScope sortScope = CarbonDataProcessorUtil.getSortScope(configuration);
     Sorter sorter;
     if (offheapsort) {
       if (configuration.getBucketingInfo() != null) {
@@ -58,7 +57,7 @@ public class SorterFactory {
         sorter = new ParallelReadMergeSorterImpl(counter);
       }
     }
-    if (batchSort) {
+    if (sortScope.equals(SortScopeOptions.SortScope.BATCH_SORT)) {
       if (configuration.getBucketingInfo() == null) {
         sorter = new UnsafeBatchParallelReadMergeSorterImpl(counter);
       } else {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/d734f530/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/unsafe/UnsafeSortDataRows.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/unsafe/UnsafeSortDataRows.java b/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/unsafe/UnsafeSortDataRows.java
index df3825a..898b73d 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/unsafe/UnsafeSortDataRows.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/unsafe/UnsafeSortDataRows.java
@@ -98,9 +98,10 @@ public class UnsafeSortDataRows {
         .getProperty(CarbonCommonConstants.ENABLE_INMEMORY_MERGE_SORT,
             CarbonCommonConstants.ENABLE_INMEMORY_MERGE_SORT_DEFAULT));
 
-    this.maxSizeAllowed = Integer.parseInt(CarbonProperties.getInstance()
-        .getProperty(CarbonCommonConstants.LOAD_BATCH_SORT_SIZE_INMB, "0"));
+    this.maxSizeAllowed = parameters.getBatchSortSizeinMb();
     if (maxSizeAllowed <= 0) {
+      // If user does not input any memory size, then take half the size of usable memory configured
+      // in sort memory size.
       this.maxSizeAllowed = UnsafeMemoryManager.INSTANCE.getUsableMemory() / 2;
     } else {
       this.maxSizeAllowed = this.maxSizeAllowed * 1024 * 1024;

http://git-wip-us.apache.org/repos/asf/carbondata/blob/d734f530/processing/src/main/java/org/apache/carbondata/processing/sortandgroupby/sortdata/SortParameters.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/sortandgroupby/sortdata/SortParameters.java b/processing/src/main/java/org/apache/carbondata/processing/sortandgroupby/sortdata/SortParameters.java
index 3c3a9d8..07149f7 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/sortandgroupby/sortdata/SortParameters.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/sortandgroupby/sortdata/SortParameters.java
@@ -114,6 +114,8 @@ public class SortParameters {
 
   private int numberOfCores;
 
+  private int batchSortSizeinMb;
+
   public SortParameters getCopy() {
     SortParameters parameters = new SortParameters();
     parameters.tempFileLocation = tempFileLocation;
@@ -138,6 +140,7 @@ public class SortParameters {
     parameters.taskNo = taskNo;
     parameters.noDictionaryDimnesionColumn = noDictionaryDimnesionColumn;
     parameters.numberOfCores = numberOfCores;
+    parameters.batchSortSizeinMb = batchSortSizeinMb;
     return parameters;
   }
 
@@ -317,6 +320,14 @@ public class SortParameters {
     this.numberOfCores = numberOfCores;
   }
 
+  public int getBatchSortSizeinMb() {
+    return batchSortSizeinMb;
+  }
+
+  public void setBatchSortSizeinMb(int batchSortSizeinMb) {
+    this.batchSortSizeinMb = batchSortSizeinMb;
+  }
+
   public static SortParameters createSortParameters(CarbonDataLoadConfiguration configuration) {
     SortParameters parameters = new SortParameters();
     CarbonTableIdentifier tableIdentifier =
@@ -334,6 +345,8 @@ public class SortParameters {
     parameters.setComplexDimColCount(configuration.getComplexDimensionCount());
     parameters.setNoDictionaryDimnesionColumn(
         CarbonDataProcessorUtil.getNoDictionaryMapping(configuration.getDataFields()));
+    parameters.setBatchSortSizeinMb(CarbonDataProcessorUtil.getBatchSortSizeinMb(configuration));
+
     parameters.setObserver(new SortObserver());
     // get sort buffer size
     parameters.setSortBufferSize(Integer.parseInt(carbonProperties

http://git-wip-us.apache.org/repos/asf/carbondata/blob/d734f530/processing/src/main/java/org/apache/carbondata/processing/util/CarbonDataProcessorUtil.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/util/CarbonDataProcessorUtil.java b/processing/src/main/java/org/apache/carbondata/processing/util/CarbonDataProcessorUtil.java
index 41bfbed..a4de24e 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/util/CarbonDataProcessorUtil.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/util/CarbonDataProcessorUtil.java
@@ -56,8 +56,10 @@ import org.apache.carbondata.processing.datatypes.GenericDataType;
 import org.apache.carbondata.processing.datatypes.PrimitiveDataType;
 import org.apache.carbondata.processing.datatypes.StructDataType;
 import org.apache.carbondata.processing.model.CarbonDataLoadSchema;
+import org.apache.carbondata.processing.newflow.CarbonDataLoadConfiguration;
 import org.apache.carbondata.processing.newflow.DataField;
 import org.apache.carbondata.processing.newflow.row.CarbonRow;
+import org.apache.carbondata.processing.newflow.sort.SortScopeOptions;
 
 import org.apache.commons.lang3.ArrayUtils;
 
@@ -522,4 +524,53 @@ public final class CarbonDataProcessorUtil {
     return aggType;
   }
 
+  /**
+   * Check whether batch sort is enabled or not.
+   * @param configuration
+   * @return
+   */
+  public static SortScopeOptions.SortScope getSortScope(CarbonDataLoadConfiguration configuration) {
+    SortScopeOptions.SortScope sortScope;
+    try {
+      // first check whether user input it from ddl, otherwise get from carbon properties
+      if (configuration.getDataLoadProperty(CarbonCommonConstants.LOAD_SORT_SCOPE) == null) {
+        sortScope = SortScopeOptions.getSortScope(CarbonProperties.getInstance()
+            .getProperty(CarbonCommonConstants.LOAD_SORT_SCOPE,
+                CarbonCommonConstants.LOAD_SORT_SCOPE_DEFAULT));
+      } else {
+        sortScope = SortScopeOptions.getSortScope(
+            configuration.getDataLoadProperty(CarbonCommonConstants.LOAD_SORT_SCOPE)
+                .toString());
+      }
+    } catch (Exception e) {
+      sortScope = SortScopeOptions.getSortScope(CarbonCommonConstants.LOAD_SORT_SCOPE_DEFAULT);
+      LOGGER.warn("sort scope is set to " + sortScope);
+    }
+    return sortScope;
+  }
+
+  /**
+   * Get the batch sort size
+   * @param configuration
+   * @return
+   */
+  public static int getBatchSortSizeinMb(CarbonDataLoadConfiguration configuration) {
+    int batchSortSizeInMb;
+    try {
+      // First try get from user input from ddl , otherwise get from carbon properties.
+      if (configuration.getDataLoadProperty(CarbonCommonConstants.LOAD_BATCH_SORT_SIZE_INMB)
+          == null) {
+        batchSortSizeInMb = Integer.parseInt(CarbonProperties.getInstance()
+            .getProperty(CarbonCommonConstants.LOAD_BATCH_SORT_SIZE_INMB, "0"));
+      } else {
+        batchSortSizeInMb = Integer.parseInt(
+            configuration.getDataLoadProperty(CarbonCommonConstants.LOAD_BATCH_SORT_SIZE_INMB)
+                .toString());
+      }
+    } catch (Exception e) {
+      batchSortSizeInMb = 0;
+    }
+    return batchSortSizeInMb;
+  }
+
 }
\ No newline at end of file


[31/42] carbondata git commit: Problem: Executor lost failure in case of data load failure due to bad records

Posted by ra...@apache.org.
Problem: Executor lost failure in case of data load failure due to bad records

Analysis: In case when we try to do data load with bad records continuously, after some time it is observed that executor is lost due to OOM error and application also gets restarted by yarn after some time. This happens because in case of data load failure due to bad records exception is thrown by the executor and task keeps retrying till the max number of retry attempts are reached. This keeps happening continuously and after some time application is restarted by yarn.

Fix: When it is known that data load failure is due to bad records and it is an intentional failure from the carbon, then in that case executor should not retry for data load and complete the job gracefully and the failure information should be handled by the driver.


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/105b7c34
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/105b7c34
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/105b7c34

Branch: refs/heads/branch-1.1
Commit: 105b7c3496db620390b804b87ac5eb5835b04176
Parents: 357ab63
Author: manishgupta88 <to...@gmail.com>
Authored: Tue Jun 6 12:18:35 2017 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 13:25:30 2017 +0530

----------------------------------------------------------------------
 .../org/apache/carbondata/spark/KeyVal.scala    |  8 ++-
 .../spark/rdd/NewCarbonDataLoadRDD.scala        | 13 +++-
 .../carbondata/spark/rdd/UpdateDataLoad.scala   |  5 --
 .../spark/rdd/CarbonDataRDDFactory.scala        | 54 ++++++++++++-----
 .../spark/rdd/CarbonDataRDDFactory.scala        | 63 ++++++++++++++------
 .../converter/impl/RowConverterImpl.java        |  3 +-
 .../exception/BadRecordFoundException.java      |  2 +-
 .../newflow/sort/impl/ThreadStatusObserver.java | 19 +++++-
 8 files changed, 120 insertions(+), 47 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/105b7c34/integration/spark-common/src/main/scala/org/apache/carbondata/spark/KeyVal.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/KeyVal.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/KeyVal.scala
index ab5fc0b..31dd4e6 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/KeyVal.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/KeyVal.scala
@@ -48,11 +48,13 @@ class RawValueImpl extends RawValue[Array[Any]] {
 }
 
 trait DataLoadResult[K, V] extends Serializable {
-  def getKey(key: String, value: LoadMetadataDetails): (K, V)
+  def getKey(key: String, value: (LoadMetadataDetails, ExecutionErrors)): (K, V)
 }
 
-class DataLoadResultImpl extends DataLoadResult[String, LoadMetadataDetails] {
-  override def getKey(key: String, value: LoadMetadataDetails): (String, LoadMetadataDetails) = {
+class DataLoadResultImpl extends DataLoadResult[String, (LoadMetadataDetails, ExecutionErrors)] {
+  override def getKey(key: String,
+      value: (LoadMetadataDetails, ExecutionErrors)): (String, (LoadMetadataDetails,
+    ExecutionErrors)) = {
     (key, value)
   }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/105b7c34/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
index a6d231d..6b30ed7 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
@@ -34,6 +34,7 @@ import org.apache.spark.{Partition, SerializableWritable, SparkContext, SparkEnv
 import org.apache.spark.rdd.{DataLoadCoalescedRDD, DataLoadPartitionWrap, RDD}
 import org.apache.spark.serializer.SerializerInstance
 import org.apache.spark.sql.Row
+import org.apache.spark.sql.execution.command.ExecutionErrors
 import org.apache.spark.util.SparkUtil
 
 import org.apache.carbondata.common.CarbonIterator
@@ -49,7 +50,7 @@ import org.apache.carbondata.processing.model.CarbonLoadModel
 import org.apache.carbondata.processing.newflow.DataLoadExecutor
 import org.apache.carbondata.processing.newflow.exception.BadRecordFoundException
 import org.apache.carbondata.spark.DataLoadResult
-import org.apache.carbondata.spark.load.CarbonLoaderUtil
+import org.apache.carbondata.spark.load.{CarbonLoaderUtil, FailureCauses}
 import org.apache.carbondata.spark.splits.TableSplit
 import org.apache.carbondata.spark.util.{CarbonQueryUtil, CarbonScalaUtil, CommonUtil}
 
@@ -219,6 +220,7 @@ class NewCarbonDataLoadRDD[K, V](
     val iter = new Iterator[(K, V)] {
       var partitionID = "0"
       val loadMetadataDetails = new LoadMetadataDetails()
+      val executionErrors = new ExecutionErrors(FailureCauses.NONE, "")
       var model: CarbonLoadModel = _
       val uniqueLoadStatusId =
         carbonLoadModel.getTableName + CarbonCommonConstants.UNDERSCORE + theSplit.index
@@ -244,6 +246,8 @@ class NewCarbonDataLoadRDD[K, V](
       } catch {
         case e: BadRecordFoundException =>
           loadMetadataDetails.setLoadStatus(CarbonCommonConstants.STORE_LOADSTATUS_PARTIAL_SUCCESS)
+          executionErrors.failureCauses = FailureCauses.BAD_RECORDS
+          executionErrors.errorMsg = e.getMessage
           logInfo("Bad Record Found")
         case e: Exception =>
           loadMetadataDetails.setLoadStatus(CarbonCommonConstants.STORE_LOADSTATUS_FAILURE)
@@ -348,7 +352,7 @@ class NewCarbonDataLoadRDD[K, V](
 
       override def next(): (K, V) = {
         finished = true
-        result.getKey(uniqueLoadStatusId, loadMetadataDetails)
+        result.getKey(uniqueLoadStatusId, (loadMetadataDetails, executionErrors))
       }
     }
     iter
@@ -394,6 +398,7 @@ class NewDataFrameLoaderRDD[K, V](
     val iter = new Iterator[(K, V)] {
       val partitionID = "0"
       val loadMetadataDetails = new LoadMetadataDetails()
+      val executionErrors = new ExecutionErrors(FailureCauses.NONE, "")
       val model: CarbonLoadModel = carbonLoadModel
       val uniqueLoadStatusId =
         carbonLoadModel.getTableName + CarbonCommonConstants.UNDERSCORE + theSplit.index
@@ -430,6 +435,8 @@ class NewDataFrameLoaderRDD[K, V](
       } catch {
         case e: BadRecordFoundException =>
           loadMetadataDetails.setLoadStatus(CarbonCommonConstants.STORE_LOADSTATUS_PARTIAL_SUCCESS)
+          executionErrors.failureCauses = FailureCauses.BAD_RECORDS
+          executionErrors.errorMsg = e.getMessage
           logInfo("Bad Record Found")
         case e: Exception =>
           loadMetadataDetails.setLoadStatus(CarbonCommonConstants.STORE_LOADSTATUS_FAILURE)
@@ -453,7 +460,7 @@ class NewDataFrameLoaderRDD[K, V](
 
       override def next(): (K, V) = {
         finished = true
-        result.getKey(uniqueLoadStatusId, loadMetadataDetails)
+        result.getKey(uniqueLoadStatusId, (loadMetadataDetails, executionErrors))
       }
     }
     iter

http://git-wip-us.apache.org/repos/asf/carbondata/blob/105b7c34/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/UpdateDataLoad.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/UpdateDataLoad.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/UpdateDataLoad.scala
index a36fb63..bcfc096 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/UpdateDataLoad.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/UpdateDataLoad.scala
@@ -28,7 +28,6 @@ import org.apache.carbondata.core.constants.CarbonCommonConstants
 import org.apache.carbondata.core.statusmanager.LoadMetadataDetails
 import org.apache.carbondata.processing.model.CarbonLoadModel
 import org.apache.carbondata.processing.newflow.DataLoadExecutor
-import org.apache.carbondata.processing.newflow.exception.BadRecordFoundException
 
 /**
  * Data load in case of update command .
@@ -61,10 +60,6 @@ object UpdateDataLoad {
         recordReaders.toArray)
 
     } catch {
-      case e: BadRecordFoundException =>
-        loadMetadataDetails.setLoadStatus(
-          CarbonCommonConstants.STORE_LOADSTATUS_PARTIAL_SUCCESS)
-        LOGGER.info("Bad Record Found")
       case e: Exception =>
         LOGGER.error(e)
         throw e

http://git-wip-us.apache.org/repos/asf/carbondata/blob/105b7c34/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
index 2922365..3d2e35b 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
@@ -49,12 +49,14 @@ import org.apache.carbondata.core.statusmanager.LoadMetadataDetails
 import org.apache.carbondata.core.util.CarbonProperties
 import org.apache.carbondata.core.util.path.CarbonStorePath
 import org.apache.carbondata.processing.csvload.BlockDetails
+import org.apache.carbondata.processing.constants.LoggerAction
+import org.apache.carbondata.processing.csvload.{BlockDetails, CSVInputFormat, StringArrayWritable}
 import org.apache.carbondata.processing.etl.DataLoadingException
 import org.apache.carbondata.processing.merger.{CarbonCompactionUtil, CarbonDataMergerUtil, CompactionType}
 import org.apache.carbondata.processing.model.CarbonLoadModel
-import org.apache.carbondata.processing.newflow.exception.CarbonDataLoadingException
+import org.apache.carbondata.processing.newflow.exception.{BadRecordFoundException, CarbonDataLoadingException}
 import org.apache.carbondata.spark._
-import org.apache.carbondata.spark.load._
+import org.apache.carbondata.spark.load.{FailureCauses, _}
 import org.apache.carbondata.spark.splits.TableSplit
 import org.apache.carbondata.spark.util.{CarbonQueryUtil, CommonUtil}
 
@@ -487,7 +489,7 @@ object CarbonDataRDDFactory {
       // CarbonCommonConstants.TABLE_SPLIT_PARTITION_DEFAULT_VALUE).toBoolean
       val isTableSplitPartition = false
       var blocksGroupBy: Array[(String, Array[BlockDetails])] = null
-      var status: Array[(String, LoadMetadataDetails)] = null
+      var status: Array[(String, (LoadMetadataDetails, ExecutionErrors))] = null
       var res: Array[List[(String, (LoadMetadataDetails, ExecutionErrors))]] = null
 
       def loadDataFile(): Unit = {
@@ -688,6 +690,12 @@ object CarbonDataRDDFactory {
                 carbonLoadModel,
                 loadMetadataDetails)
             } catch {
+              case e: BadRecordFoundException =>
+                loadMetadataDetails
+                  .setLoadStatus(CarbonCommonConstants.STORE_LOADSTATUS_PARTIAL_SUCCESS)
+                executionErrors.failureCauses = FailureCauses.BAD_RECORDS
+                executionErrors.errorMsg = e.getMessage
+                LOGGER.info("Bad Record Found")
               case e: Exception =>
                 LOGGER.info("DataLoad failure")
                 LOGGER.error(e)
@@ -744,8 +752,7 @@ object CarbonDataRDDFactory {
           loadDataFrameForUpdate()
         } else if (dataFrame.isDefined) {
           loadDataFrame()
-        }
-        else {
+        } else {
           loadDataFile()
         }
         if (updateModel.isDefined) {
@@ -762,25 +769,30 @@ object CarbonDataRDDFactory {
                 else {
                   updateModel.get.executorErrors = resultOfBlock._2._2
                 }
+              } else if (resultOfBlock._2._1.getLoadStatus
+                .equalsIgnoreCase(CarbonCommonConstants.STORE_LOADSTATUS_PARTIAL_SUCCESS)) {
+                loadStatus = CarbonCommonConstants.STORE_LOADSTATUS_PARTIAL_SUCCESS
+                updateModel.get.executorErrors.failureCauses = resultOfBlock._2._2.failureCauses
+                updateModel.get.executorErrors.errorMsg = resultOfBlock._2._2.errorMsg
               }
             }
           ))
 
         }
         else {
-        val newStatusMap = scala.collection.mutable.Map.empty[String, String]
+          val newStatusMap = scala.collection.mutable.Map.empty[String, String]
         if (status.nonEmpty) {
           status.foreach { eachLoadStatus =>
             val state = newStatusMap.get(eachLoadStatus._1)
             state match {
               case Some(CarbonCommonConstants.STORE_LOADSTATUS_FAILURE) =>
-                newStatusMap.put(eachLoadStatus._1, eachLoadStatus._2.getLoadStatus)
+                newStatusMap.put(eachLoadStatus._1, eachLoadStatus._2._1.getLoadStatus)
               case Some(CarbonCommonConstants.STORE_LOADSTATUS_PARTIAL_SUCCESS)
-                if eachLoadStatus._2.getLoadStatus ==
+                if eachLoadStatus._2._1.getLoadStatus ==
                     CarbonCommonConstants.STORE_LOADSTATUS_SUCCESS =>
-                newStatusMap.put(eachLoadStatus._1, eachLoadStatus._2.getLoadStatus)
+                newStatusMap.put(eachLoadStatus._1, eachLoadStatus._2._1.getLoadStatus)
               case _ =>
-                newStatusMap.put(eachLoadStatus._1, eachLoadStatus._2.getLoadStatus)
+                newStatusMap.put(eachLoadStatus._1, eachLoadStatus._2._1.getLoadStatus)
             }
           }
 
@@ -833,8 +845,11 @@ object CarbonDataRDDFactory {
             }
           }
           return
-        }
-        else {
+        } else if (loadStatus == CarbonCommonConstants.STORE_LOADSTATUS_PARTIAL_SUCCESS &&
+                   updateModel.get.executorErrors.failureCauses == FailureCauses.BAD_RECORDS &&
+                   carbonLoadModel.getBadRecordsAction.split(",")(1) == LoggerAction.FAIL.name) {
+          return
+        } else {
           // in success case handle updation of the table status file.
           // success case.
           val segmentDetails = new util.HashSet[String]()
@@ -883,7 +898,7 @@ object CarbonDataRDDFactory {
 
         return
       }
-        LOGGER.info("********starting clean up**********")
+      LOGGER.info("********starting clean up**********")
       if (loadStatus == CarbonCommonConstants.STORE_LOADSTATUS_FAILURE) {
         CarbonLoaderUtil.deleteSegment(carbonLoadModel, currentLoadCount)
         LOGGER.info("********clean up done**********")
@@ -892,7 +907,18 @@ object CarbonDataRDDFactory {
         LOGGER.warn("Cannot write load metadata file as data load failed")
         throw new Exception(errorMessage)
       } else {
-        val metadataDetails = status(0)._2
+        // check if data load fails due to bad record and throw data load failure due to
+        // bad record exception
+        if (loadStatus == CarbonCommonConstants.STORE_LOADSTATUS_PARTIAL_SUCCESS &&
+            status(0)._2._2.failureCauses == FailureCauses.BAD_RECORDS &&
+            carbonLoadModel.getBadRecordsAction.split(",")(1) == LoggerAction.FAIL.name) {
+          CarbonLoaderUtil.deleteSegment(carbonLoadModel, currentLoadCount)
+          LOGGER.info("********clean up done**********")
+          LOGGER.audit(s"Data load is failed for " +
+                       s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
+          throw new Exception(status(0)._2._2.errorMsg)
+        }
+        val metadataDetails = status(0)._2._1
         if (!isAgg) {
             writeDictionary(carbonLoadModel, result, false)
             val status = CarbonLoaderUtil.recordLoadMetadata(currentLoadCount, metadataDetails,

http://git-wip-us.apache.org/repos/asf/carbondata/blob/105b7c34/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala b/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
index b4720a9..cab78fe 100644
--- a/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
+++ b/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
@@ -48,13 +48,14 @@ import org.apache.carbondata.core.mutate.CarbonUpdateUtil
 import org.apache.carbondata.core.statusmanager.LoadMetadataDetails
 import org.apache.carbondata.core.util.{ByteUtil, CarbonProperties}
 import org.apache.carbondata.core.util.path.CarbonStorePath
+import org.apache.carbondata.processing.constants.LoggerAction
 import org.apache.carbondata.processing.csvload.{BlockDetails, CSVInputFormat, StringArrayWritable}
 import org.apache.carbondata.processing.etl.DataLoadingException
 import org.apache.carbondata.processing.merger.{CarbonCompactionUtil, CarbonDataMergerUtil, CompactionType}
 import org.apache.carbondata.processing.model.CarbonLoadModel
-import org.apache.carbondata.processing.newflow.exception.CarbonDataLoadingException
+import org.apache.carbondata.processing.newflow.exception.{BadRecordFoundException, CarbonDataLoadingException}
 import org.apache.carbondata.spark._
-import org.apache.carbondata.spark.load._
+import org.apache.carbondata.spark.load.{FailureCauses, _}
 import org.apache.carbondata.spark.splits.TableSplit
 import org.apache.carbondata.spark.util.{CarbonQueryUtil, CarbonScalaUtil, CommonUtil}
 
@@ -501,7 +502,7 @@ object CarbonDataRDDFactory {
       // CarbonCommonConstants.TABLE_SPLIT_PARTITION_DEFAULT_VALUE).toBoolean
       val isTableSplitPartition = false
       var blocksGroupBy: Array[(String, Array[BlockDetails])] = null
-      var status: Array[(String, LoadMetadataDetails)] = null
+      var status: Array[(String, (LoadMetadataDetails, ExecutionErrors))] = null
       var res: Array[List[(String, (LoadMetadataDetails, ExecutionErrors))]] = null
 
       def loadDataFile(): Unit = {
@@ -701,6 +702,12 @@ object CarbonDataRDDFactory {
                 carbonLoadModel,
                 loadMetadataDetails)
             } catch {
+              case e: BadRecordFoundException =>
+                loadMetadataDetails
+                  .setLoadStatus(CarbonCommonConstants.STORE_LOADSTATUS_PARTIAL_SUCCESS)
+                executionErrors.failureCauses = FailureCauses.BAD_RECORDS
+                executionErrors.errorMsg = e.getMessage
+                LOGGER.info("Bad Record Found")
               case e: Exception =>
                 LOGGER.info("DataLoad failure")
                 LOGGER.error(e)
@@ -791,6 +798,11 @@ object CarbonDataRDDFactory {
                 else {
                   updateModel.get.executorErrors = resultOfBlock._2._2
                 }
+              } else if (resultOfBlock._2._1.getLoadStatus
+                .equalsIgnoreCase(CarbonCommonConstants.STORE_LOADSTATUS_PARTIAL_SUCCESS)) {
+                loadStatus = CarbonCommonConstants.STORE_LOADSTATUS_PARTIAL_SUCCESS
+                updateModel.get.executorErrors.failureCauses = resultOfBlock._2._2.failureCauses
+                updateModel.get.executorErrors.errorMsg = resultOfBlock._2._2.errorMsg
               }
             }
           ))
@@ -798,20 +810,20 @@ object CarbonDataRDDFactory {
         }
         else {
         val newStatusMap = scala.collection.mutable.Map.empty[String, String]
-        if (status.nonEmpty) {
-          status.foreach { eachLoadStatus =>
-            val state = newStatusMap.get(eachLoadStatus._1)
-            state match {
-              case Some(CarbonCommonConstants.STORE_LOADSTATUS_FAILURE) =>
-                newStatusMap.put(eachLoadStatus._1, eachLoadStatus._2.getLoadStatus)
-              case Some(CarbonCommonConstants.STORE_LOADSTATUS_PARTIAL_SUCCESS)
-                if eachLoadStatus._2.getLoadStatus ==
-                    CarbonCommonConstants.STORE_LOADSTATUS_SUCCESS =>
-                newStatusMap.put(eachLoadStatus._1, eachLoadStatus._2.getLoadStatus)
-              case _ =>
-                newStatusMap.put(eachLoadStatus._1, eachLoadStatus._2.getLoadStatus)
+          if (status.nonEmpty) {
+            status.foreach { eachLoadStatus =>
+              val state = newStatusMap.get(eachLoadStatus._1)
+              state match {
+                case Some(CarbonCommonConstants.STORE_LOADSTATUS_FAILURE) =>
+                  newStatusMap.put(eachLoadStatus._1, eachLoadStatus._2._1.getLoadStatus)
+                case Some(CarbonCommonConstants.STORE_LOADSTATUS_PARTIAL_SUCCESS)
+                  if eachLoadStatus._2._1.getLoadStatus ==
+                     CarbonCommonConstants.STORE_LOADSTATUS_SUCCESS =>
+                  newStatusMap.put(eachLoadStatus._1, eachLoadStatus._2._1.getLoadStatus)
+                case _ =>
+                  newStatusMap.put(eachLoadStatus._1, eachLoadStatus._2._1.getLoadStatus)
+              }
             }
-          }
 
           newStatusMap.foreach {
             case (key, value) =>
@@ -864,6 +876,10 @@ object CarbonDataRDDFactory {
             }
           }
           return
+        } else if (loadStatus == CarbonCommonConstants.STORE_LOADSTATUS_PARTIAL_SUCCESS &&
+                   updateModel.get.executorErrors.failureCauses == FailureCauses.BAD_RECORDS &&
+                   carbonLoadModel.getBadRecordsAction.split(",")(1) == LoggerAction.FAIL.name) {
+          return
         } else {
           // in success case handle updation of the table status file.
           // success case.
@@ -913,7 +929,7 @@ object CarbonDataRDDFactory {
 
         return
       }
-        LOGGER.info("********starting clean up**********")
+      LOGGER.info("********starting clean up**********")
       if (loadStatus == CarbonCommonConstants.STORE_LOADSTATUS_FAILURE) {
         CarbonLoaderUtil.deleteSegment(carbonLoadModel, currentLoadCount)
         LOGGER.info("********clean up done**********")
@@ -922,6 +938,17 @@ object CarbonDataRDDFactory {
         LOGGER.warn("Cannot write load metadata file as data load failed")
         throw new Exception(errorMessage)
       } else {
+        // check if data load fails due to bad record and throw data load failure due to
+        // bad record exception
+        if (loadStatus == CarbonCommonConstants.STORE_LOADSTATUS_PARTIAL_SUCCESS &&
+            status(0)._2._2.failureCauses == FailureCauses.BAD_RECORDS &&
+            carbonLoadModel.getBadRecordsAction.split(",")(1) == LoggerAction.FAIL.name) {
+          CarbonLoaderUtil.deleteSegment(carbonLoadModel, currentLoadCount)
+          LOGGER.info("********clean up done**********")
+          LOGGER.audit(s"Data load is failed for " +
+                       s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
+          throw new Exception(status(0)._2._2.errorMsg)
+        }
         // if segment is empty then fail the data load
         if (!CarbonLoaderUtil.isValidSegment(carbonLoadModel, currentLoadCount)) {
           CarbonLoaderUtil.deleteSegment(carbonLoadModel, currentLoadCount)
@@ -932,7 +959,7 @@ object CarbonDataRDDFactory {
           LOGGER.warn("Cannot write load metadata file as data load failed")
           throw new Exception("No Data to load")
         }
-        val metadataDetails = status(0)._2
+        val metadataDetails = status(0)._2._1
         if (!isAgg) {
           writeDictionary(carbonLoadModel, result, false)
           val status = CarbonLoaderUtil.recordLoadMetadata(currentLoadCount, metadataDetails,

http://git-wip-us.apache.org/repos/asf/carbondata/blob/105b7c34/processing/src/main/java/org/apache/carbondata/processing/newflow/converter/impl/RowConverterImpl.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/newflow/converter/impl/RowConverterImpl.java b/processing/src/main/java/org/apache/carbondata/processing/newflow/converter/impl/RowConverterImpl.java
index 5a476da..90d0ea5 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/newflow/converter/impl/RowConverterImpl.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/newflow/converter/impl/RowConverterImpl.java
@@ -42,6 +42,7 @@ import org.apache.carbondata.processing.newflow.constants.DataLoadProcessorConst
 import org.apache.carbondata.processing.newflow.converter.BadRecordLogHolder;
 import org.apache.carbondata.processing.newflow.converter.FieldConverter;
 import org.apache.carbondata.processing.newflow.converter.RowConverter;
+import org.apache.carbondata.processing.newflow.exception.BadRecordFoundException;
 import org.apache.carbondata.processing.newflow.exception.CarbonDataLoadingException;
 import org.apache.carbondata.processing.newflow.row.CarbonRow;
 import org.apache.carbondata.processing.surrogatekeysgenerator.csvbased.BadRecordsLogger;
@@ -156,7 +157,7 @@ public class RowConverterImpl implements RowConverter {
       if (!logHolder.isLogged() && logHolder.isBadRecordNotAdded()) {
         if (badRecordLogger.isDataLoadFail()) {
           String error = "Data load failed due to bad record: " + logHolder.getReason();
-          throw new CarbonDataLoadingException(error);
+          throw new BadRecordFoundException(error);
         }
         badRecordLogger.addBadRecordsToBuilder(copy.getData(), logHolder.getReason());
         logHolder.clear();

http://git-wip-us.apache.org/repos/asf/carbondata/blob/105b7c34/processing/src/main/java/org/apache/carbondata/processing/newflow/exception/BadRecordFoundException.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/newflow/exception/BadRecordFoundException.java b/processing/src/main/java/org/apache/carbondata/processing/newflow/exception/BadRecordFoundException.java
index 840f28c..eb95528 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/newflow/exception/BadRecordFoundException.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/newflow/exception/BadRecordFoundException.java
@@ -16,7 +16,7 @@
  */
 package org.apache.carbondata.processing.newflow.exception;
 
-public class BadRecordFoundException extends Exception {
+public class BadRecordFoundException extends CarbonDataLoadingException {
   /**
    * default serial version ID.
    */

http://git-wip-us.apache.org/repos/asf/carbondata/blob/105b7c34/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/impl/ThreadStatusObserver.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/impl/ThreadStatusObserver.java b/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/impl/ThreadStatusObserver.java
index d901ba4..56a32a3 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/impl/ThreadStatusObserver.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/impl/ThreadStatusObserver.java
@@ -21,6 +21,11 @@ import java.util.concurrent.ExecutorService;
 
 public class ThreadStatusObserver {
 
+  /**
+   * lock object
+   */
+  private Object lock = new Object();
+
   private ExecutorService executorService;
 
   private Throwable throwable;
@@ -30,8 +35,18 @@ public class ThreadStatusObserver {
   }
 
   public void notifyFailed(Throwable throwable) {
-    executorService.shutdownNow();
-    this.throwable = throwable;
+    // Only the first failing thread should call for shutting down the executor service and
+    // should assign the throwable object else the actual cause for failure can be overridden as
+    // all the running threads will throw interrupted exception on calling shutdownNow and
+    // will override the throwable object
+    if (null == this.throwable) {
+      synchronized (lock) {
+        if (null == this.throwable) {
+          executorService.shutdownNow();
+          this.throwable = throwable;
+        }
+      }
+    }
   }
 
   public Throwable getThrowable() {


[24/42] carbondata git commit: MultiClient Load is failing

Posted by ra...@apache.org.
MultiClient Load is failing


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/ef583afe
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/ef583afe
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/ef583afe

Branch: refs/heads/branch-1.1
Commit: ef583afe6968d1222553810bdc2251cef16f016c
Parents: 735e477
Author: nareshpr <pr...@gmail.com>
Authored: Tue May 30 14:48:10 2017 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 13:12:41 2017 +0530

----------------------------------------------------------------------
 .../org/apache/carbondata/spark/load/CarbonLoaderUtil.java  | 9 ++-------
 .../apache/carbondata/spark/rdd/DataManagementFunc.scala    | 4 ++--
 .../apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala  | 2 +-
 .../apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala  | 2 +-
 .../spark/sql/execution/command/carbonTableSchema.scala     | 6 +++---
 .../scala/org/apache/spark/sql/hive/CarbonMetastore.scala   | 2 --
 6 files changed, 9 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/ef583afe/integration/spark-common/src/main/java/org/apache/carbondata/spark/load/CarbonLoaderUtil.java
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/java/org/apache/carbondata/spark/load/CarbonLoaderUtil.java b/integration/spark-common/src/main/java/org/apache/carbondata/spark/load/CarbonLoaderUtil.java
index a4f15d2..54e12f3 100644
--- a/integration/spark-common/src/main/java/org/apache/carbondata/spark/load/CarbonLoaderUtil.java
+++ b/integration/spark-common/src/main/java/org/apache/carbondata/spark/load/CarbonLoaderUtil.java
@@ -58,7 +58,6 @@ import org.apache.carbondata.core.fileoperations.AtomicFileOperationsImpl;
 import org.apache.carbondata.core.fileoperations.FileWriteOperation;
 import org.apache.carbondata.core.locks.ICarbonLock;
 import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier;
-import org.apache.carbondata.core.metadata.CarbonMetadata;
 import org.apache.carbondata.core.metadata.CarbonTableIdentifier;
 import org.apache.carbondata.core.metadata.ColumnIdentifier;
 import org.apache.carbondata.core.metadata.datatype.DataType;
@@ -783,14 +782,10 @@ public final class CarbonLoaderUtil {
    * This method will get the store location for the given path, segment id and partition id
    *
    * @param carbonStorePath
-   * @param dbName
-   * @param tableName
    * @param segmentId
    */
-  public static void checkAndCreateCarbonDataLocation(String carbonStorePath, String dbName,
-      String tableName, String segmentId) {
-    CarbonTable carbonTable = CarbonMetadata.getInstance()
-        .getCarbonTable(dbName + CarbonCommonConstants.UNDERSCORE + tableName);
+  public static void checkAndCreateCarbonDataLocation(String carbonStorePath,
+      String segmentId, CarbonTable carbonTable) {
     CarbonTableIdentifier carbonTableIdentifier = carbonTable.getCarbonTableIdentifier();
     CarbonTablePath carbonTablePath =
         CarbonStorePath.getCarbonTablePath(carbonStorePath, carbonTableIdentifier);

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ef583afe/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/DataManagementFunc.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/DataManagementFunc.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/DataManagementFunc.scala
index 8039d24..1790ea2 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/DataManagementFunc.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/DataManagementFunc.scala
@@ -29,7 +29,7 @@ import org.apache.spark.sql.execution.command.{CompactionCallableModel, Compacti
 import org.apache.carbondata.common.logging.LogServiceFactory
 import org.apache.carbondata.core.constants.CarbonCommonConstants
 import org.apache.carbondata.core.locks.{CarbonLockFactory, CarbonLockUtil, LockUsage}
-import org.apache.carbondata.core.metadata.{CarbonMetadata, CarbonTableIdentifier}
+import org.apache.carbondata.core.metadata.CarbonTableIdentifier
 import org.apache.carbondata.core.metadata.schema.table.CarbonTable
 import org.apache.carbondata.core.mutate.CarbonUpdateUtil
 import org.apache.carbondata.core.statusmanager.{LoadMetadataDetails, SegmentStatusManager}
@@ -59,7 +59,7 @@ object DataManagementFunc {
 
     val sc = sqlContext
     // Delete the records based on data
-    val table = CarbonMetadata.getInstance.getCarbonTable(databaseName + "_" + tableName)
+    val table = schema.getCarbonTable
     val loadMetadataDetailsArray =
       SegmentStatusManager.readLoadMetadata(table.getMetaDataFilepath).toList
     val resultMap = new CarbonDeleteLoadByDateRDD(

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ef583afe/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
index f159c61..2922365 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
@@ -734,7 +734,7 @@ object CarbonDataRDDFactory {
 
       if (!updateModel.isDefined) {
       CarbonLoaderUtil.checkAndCreateCarbonDataLocation(storePath,
-        carbonLoadModel.getDatabaseName, carbonLoadModel.getTableName, currentLoadCount.toString)
+        currentLoadCount.toString, carbonTable)
       }
       var loadStatus = CarbonCommonConstants.STORE_LOADSTATUS_SUCCESS
       var errorMessage: String = "DataLoad failure"

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ef583afe/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala b/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
index bbdbe4f..b4720a9 100644
--- a/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
+++ b/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
@@ -764,7 +764,7 @@ object CarbonDataRDDFactory {
 
       if (!updateModel.isDefined) {
       CarbonLoaderUtil.checkAndCreateCarbonDataLocation(storePath,
-        carbonLoadModel.getDatabaseName, carbonLoadModel.getTableName, currentLoadCount.toString)
+        currentLoadCount.toString, carbonTable)
       }
       var loadStatus = CarbonCommonConstants.STORE_LOADSTATUS_SUCCESS
       var errorMessage: String = "DataLoad failure"

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ef583afe/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
index 5dd6832..8818c6b 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
@@ -41,7 +41,7 @@ import org.apache.carbondata.core.constants.CarbonCommonConstants
 import org.apache.carbondata.core.datastore.impl.FileFactory
 import org.apache.carbondata.core.dictionary.server.DictionaryServer
 import org.apache.carbondata.core.locks.{CarbonLockFactory, LockUsage}
-import org.apache.carbondata.core.metadata.{CarbonMetadata, CarbonTableIdentifier}
+import org.apache.carbondata.core.metadata.CarbonTableIdentifier
 import org.apache.carbondata.core.metadata.encoder.Encoding
 import org.apache.carbondata.core.metadata.schema.table.{CarbonTable, TableInfo}
 import org.apache.carbondata.core.metadata.schema.table.column.CarbonDimension
@@ -89,7 +89,7 @@ case class AlterTableCompaction(alterTableModel: AlterTableModel) extends Runnab
     if (relation == null) {
       sys.error(s"Table $databaseName.$tableName does not exist")
     }
-    if (null == CarbonMetadata.getInstance.getCarbonTable(databaseName + "_" + tableName)) {
+    if (null == relation.tableMeta.carbonTable) {
       LOGGER.error(s"alter table failed. table not found: $databaseName.$tableName")
       sys.error(s"alter table failed. table not found: $databaseName.$tableName")
     }
@@ -352,7 +352,7 @@ case class LoadTable(
     if (relation == null) {
       sys.error(s"Table $dbName.$tableName does not exist")
     }
-    if (null == CarbonMetadata.getInstance.getCarbonTable(dbName + "_" + tableName)) {
+    if (null == relation.tableMeta.carbonTable) {
       LOGGER.error(s"Data loading failed. table not found: $dbName.$tableName")
       LOGGER.audit(s"Data loading failed. table not found: $dbName.$tableName")
       sys.error(s"Data loading failed. table not found: $dbName.$tableName")

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ef583afe/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonMetastore.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonMetastore.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonMetastore.scala
index 1f5736e..954801a 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonMetastore.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonMetastore.scala
@@ -40,7 +40,6 @@ import org.apache.carbondata.core.datastore.filesystem.CarbonFile
 import org.apache.carbondata.core.datastore.impl.FileFactory
 import org.apache.carbondata.core.datastore.impl.FileFactory.FileType
 import org.apache.carbondata.core.fileoperations.FileWriteOperation
-import org.apache.carbondata.core.locks.ZookeeperInit
 import org.apache.carbondata.core.metadata.{CarbonMetadata, CarbonTableIdentifier}
 import org.apache.carbondata.core.metadata.converter.ThriftWrapperSchemaConverterImpl
 import org.apache.carbondata.core.metadata.datatype.DataType.DECIMAL
@@ -529,7 +528,6 @@ class CarbonMetastore(conf: RuntimeConfig, val storePath: String) {
         case Some(tableMeta) =>
           metadata.tablesMeta -= tableMeta
           CarbonMetadata.getInstance.removeTable(dbName + "_" + tableName)
-          CarbonMetadata.getInstance.removeTable(dbName + "_" + tableName)
           updateSchemasUpdatedTime(touchSchemaFileSystemTime(dbName, tableName))
         case None =>
           LOGGER.info(s"Metadata does not contain entry for table $tableName in database $dbName")


[03/42] carbondata git commit: [CARBONDATA-909] Added option to specify single pass load in data frame

Posted by ra...@apache.org.
[CARBONDATA-909] Added option to specify single pass load in data frame


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/38a51449
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/38a51449
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/38a51449

Branch: refs/heads/branch-1.1
Commit: 38a51449ba9a35706bae2a78628a9654f0634960
Parents: 01048f8
Author: Sanoj MG <sa...@gmail.com>
Authored: Wed Apr 12 22:32:51 2017 +0400
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 12:44:00 2017 +0530

----------------------------------------------------------------------
 .../testsuite/dataload/TestLoadDataFrame.scala  | 32 ++++++++++++++++++++
 .../spark/CarbonDataFrameWriter.scala           |  3 +-
 2 files changed, 34 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/38a51449/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataFrame.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataFrame.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataFrame.scala
index f50620f..9179c08 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataFrame.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataFrame.scala
@@ -59,6 +59,8 @@ class TestLoadDataFrame extends QueryTest with BeforeAndAfterAll {
     sql("DROP TABLE IF EXISTS carbon5")
     sql("DROP TABLE IF EXISTS carbon6")
     sql("DROP TABLE IF EXISTS carbon7")
+    sql("DROP TABLE IF EXISTS carbon8")
+    sql("DROP TABLE IF EXISTS carbon9")
   }
 
 
@@ -167,6 +169,36 @@ class TestLoadDataFrame extends QueryTest with BeforeAndAfterAll {
     )
   }
 
+  test("test load dataframe with single pass enabled") {
+    // save dataframe to carbon file
+    df.write
+      .format("carbondata")
+      .option("tableName", "carbon8")
+      .option("tempCSV", "false")
+      .option("single_pass", "true")
+      .option("compress", "false")
+      .mode(SaveMode.Overwrite)
+      .save()
+    checkAnswer(
+      sql("select count(*) from carbon8 where c3 > 500"), Row(500)
+    )
+  }
+
+  test("test load dataframe with single pass disabled") {
+    // save dataframe to carbon file
+    df.write
+      .format("carbondata")
+      .option("tableName", "carbon9")
+      .option("tempCSV", "true")
+      .option("single_pass", "false")
+      .option("compress", "false")
+      .mode(SaveMode.Overwrite)
+      .save()
+    checkAnswer(
+      sql("select count(*) from carbon9 where c3 > 500"), Row(500)
+    )
+  }
+
   override def afterAll {
     dropTable
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/38a51449/integration/spark/src/main/scala/org/apache/carbondata/spark/CarbonDataFrameWriter.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/CarbonDataFrameWriter.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/CarbonDataFrameWriter.scala
index 36e2440..0d1b1df 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/CarbonDataFrameWriter.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/CarbonDataFrameWriter.scala
@@ -193,7 +193,8 @@ class CarbonDataFrameWriter(val dataFrame: DataFrame) {
     s"""
           LOAD DATA INPATH '$csvFolder'
           INTO TABLE ${options.dbName}.${options.tableName}
-          OPTIONS ('FILEHEADER' = '${dataFrame.columns.mkString(",")}')
+          OPTIONS ('FILEHEADER' = '${dataFrame.columns.mkString(",")}',
+          'SINGLE_PASS' = '${options.singlePass}')
       """
   }
 


[34/42] carbondata git commit: Inset Filter

Posted by ra...@apache.org.
Inset Filter


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/5b66732c
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/5b66732c
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/5b66732c

Branch: refs/heads/branch-1.1
Commit: 5b66732ccf0f54e09f61a3c6e07c26c69ae03e28
Parents: 42ad4ab
Author: sounakr <so...@gmail.com>
Authored: Fri May 26 16:59:44 2017 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 13:26:19 2017 +0530

----------------------------------------------------------------------
 .../apache/spark/sql/execution/CarbonLateDecodeStrategy.scala    | 4 ++++
 .../scala/org/apache/spark/sql/optimizer/CarbonFilters.scala     | 4 ++++
 2 files changed, 8 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/5b66732c/integration/spark2/src/main/scala/org/apache/spark/sql/execution/CarbonLateDecodeStrategy.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/CarbonLateDecodeStrategy.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/CarbonLateDecodeStrategy.scala
index ac43a12..4605914 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/CarbonLateDecodeStrategy.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/CarbonLateDecodeStrategy.scala
@@ -471,6 +471,10 @@ private[sql] class CarbonLateDecodeStrategy extends SparkStrategy {
         Some(CastExpr(c))
       case c@In(Cast(a: Attribute, _), list) if !list.exists(!_.isInstanceOf[Literal]) =>
         Some(CastExpr(c))
+      case InSet(a: Attribute, set) =>
+        Some(sources.In(a.name, set.toArray))
+      case Not(InSet(a: Attribute, set)) =>
+        Some(sources.Not(sources.In(a.name, set.toArray)))
       case GreaterThan(a: Attribute, Literal(v, t)) =>
         Some(sources.GreaterThan(a.name, v))
       case GreaterThan(Literal(v, t), a: Attribute) =>

http://git-wip-us.apache.org/repos/asf/carbondata/blob/5b66732c/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonFilters.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonFilters.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonFilters.scala
index f8abd67..89823fe 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonFilters.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonFilters.scala
@@ -197,6 +197,10 @@ object CarbonFilters {
           Some(CastExpr(c))
         case c@In(Cast(a: Attribute, _), list) if !list.exists(!_.isInstanceOf[Literal]) =>
             Some(CastExpr(c))
+        case InSet(a: Attribute, set) =>
+          Some(sources.In(a.name, set.toArray))
+        case Not(InSet(a: Attribute, set)) =>
+          Some(sources.Not(sources.In(a.name, set.toArray)))
         case GreaterThan(a: Attribute, Literal(v, t)) =>
           Some(sources.GreaterThan(a.name, v))
         case GreaterThan(Literal(v, t), a: Attribute) =>


[26/42] carbondata git commit: Acquire semaphore before submit a producer in finish.

Posted by ra...@apache.org.
Acquire semaphore before submit a producer in finish.


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/2403f280
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/2403f280
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/2403f280

Branch: refs/heads/branch-1.1
Commit: 2403f2807d1f9d4257a34ecf322d7262ca3a6320
Parents: 64f973e
Author: Yadong Qi <qi...@gmail.com>
Authored: Thu Jun 1 20:28:19 2017 +0800
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 13:16:56 2017 +0530

----------------------------------------------------------------------
 .../store/CarbonFactDataHandlerColumnar.java    | 20 +++++++++++++-------
 1 file changed, 13 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/2403f280/processing/src/main/java/org/apache/carbondata/processing/store/CarbonFactDataHandlerColumnar.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/store/CarbonFactDataHandlerColumnar.java b/processing/src/main/java/org/apache/carbondata/processing/store/CarbonFactDataHandlerColumnar.java
index f6ceb84..4ba1717 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/store/CarbonFactDataHandlerColumnar.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/store/CarbonFactDataHandlerColumnar.java
@@ -862,13 +862,19 @@ public class CarbonFactDataHandlerColumnar implements CarbonFactHandler {
   public void finish() throws CarbonDataWriterException {
     // still some data is present in stores if entryCount is more
     // than 0
-    producerExecutorServiceTaskList.add(producerExecutorService
-        .submit(new Producer(blockletDataHolder, dataRows, ++writerTaskSequenceCounter, true)));
-    blockletProcessingCount.incrementAndGet();
-    processedDataCount += entryCount;
-    closeWriterExecutionService(producerExecutorService);
-    processWriteTaskSubmitList(producerExecutorServiceTaskList);
-    processingComplete = true;
+    try {
+      semaphore.acquire();
+      producerExecutorServiceTaskList.add(producerExecutorService
+          .submit(new Producer(blockletDataHolder, dataRows, ++writerTaskSequenceCounter, true)));
+      blockletProcessingCount.incrementAndGet();
+      processedDataCount += entryCount;
+      closeWriterExecutionService(producerExecutorService);
+      processWriteTaskSubmitList(producerExecutorServiceTaskList);
+      processingComplete = true;
+    } catch (InterruptedException e) {
+      LOGGER.error(e, e.getMessage());
+      throw new CarbonDataWriterException(e.getMessage(), e);
+    }
   }
 
   /**


[40/42] carbondata git commit: [CARBONDATA - 1159] Batch sort loading is not proper without synchronization

Posted by ra...@apache.org.
[CARBONDATA - 1159] Batch sort loading is not proper without synchronization


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/72bbb62b
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/72bbb62b
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/72bbb62b

Branch: refs/heads/branch-1.1
Commit: 72bbb62bc0bb8e7a38be09938c3cfae171af2ea2
Parents: da952e8
Author: dhatchayani <dh...@gmail.com>
Authored: Mon Jun 12 21:56:47 2017 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 13:36:48 2017 +0530

----------------------------------------------------------------------
 .../UnsafeBatchParallelReadMergeSorterImpl.java |  7 ++-
 .../newflow/sort/unsafe/UnsafeSortDataRows.java | 49 +++++++++++---------
 .../util/CarbonDataProcessorUtil.java           |  7 ++-
 3 files changed, 37 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/72bbb62b/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/impl/UnsafeBatchParallelReadMergeSorterImpl.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/impl/UnsafeBatchParallelReadMergeSorterImpl.java b/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/impl/UnsafeBatchParallelReadMergeSorterImpl.java
index 0c6fa27..20fd48b 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/impl/UnsafeBatchParallelReadMergeSorterImpl.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/impl/UnsafeBatchParallelReadMergeSorterImpl.java
@@ -147,9 +147,9 @@ public class UnsafeBatchParallelReadMergeSorterImpl extends AbstractMergeSorter
             }
           }
           if (i > 0) {
-            sortDataRows.getSortDataRow().addRowBatch(buffer, i);
-            rowCounter.getAndAdd(i);
             synchronized (sortDataRows) {
+              sortDataRows.getSortDataRow().addRowBatchWithOutSync(buffer, i);
+              rowCounter.getAndAdd(i);
               if (!sortDataRows.getSortDataRow().canAdd()) {
                 sortDataRows.finish();
                 sortDataRows.createSortDataRows();
@@ -197,6 +197,9 @@ public class UnsafeBatchParallelReadMergeSorterImpl extends AbstractMergeSorter
 
     private void createSortDataRows() {
       int inMemoryChunkSizeInMB = CarbonProperties.getInstance().getSortMemoryChunkSizeInMB();
+      if (inMemoryChunkSizeInMB > sortParameters.getBatchSortSizeinMb()) {
+        inMemoryChunkSizeInMB = sortParameters.getBatchSortSizeinMb();
+      }
       this.finalMerger = new UnsafeSingleThreadFinalSortFilesMerger(sortParameters,
           sortParameters.getTempFileLocation());
       unsafeIntermediateFileMerger = new UnsafeIntermediateMerger(sortParameters);

http://git-wip-us.apache.org/repos/asf/carbondata/blob/72bbb62b/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/unsafe/UnsafeSortDataRows.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/unsafe/UnsafeSortDataRows.java b/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/unsafe/UnsafeSortDataRows.java
index 898b73d..b4daa51 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/unsafe/UnsafeSortDataRows.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/unsafe/UnsafeSortDataRows.java
@@ -132,7 +132,7 @@ public class UnsafeSortDataRows {
   public static MemoryBlock getMemoryBlock(long size) throws CarbonSortKeyAndGroupByException {
     MemoryBlock baseBlock = null;
     int tries = 0;
-    while (true && tries < 100) {
+    while (tries < 100) {
       baseBlock = UnsafeMemoryManager.INSTANCE.allocateMemory(size);
       if (baseBlock == null) {
         try {
@@ -165,29 +165,32 @@ public class UnsafeSortDataRows {
     // if record holder list size is equal to sort buffer size then it will
     // sort the list and then write current list data to file
     synchronized (addRowsLock) {
-      for (int i = 0; i < size; i++) {
-        if (rowPage.canAdd()) {
-          bytesAdded += rowPage.addRow(rowBatch[i]);
-        } else {
-          try {
-            if (enableInMemoryIntermediateMerge) {
-              unsafeInMemoryIntermediateFileMerger.startInmemoryMergingIfPossible();
-            }
-            unsafeInMemoryIntermediateFileMerger.startFileMergingIfPossible();
-            semaphore.acquire();
-            dataSorterAndWriterExecutorService.submit(new DataSorterAndWriter(rowPage));
-            MemoryBlock memoryBlock = getMemoryBlock(inMemoryChunkSize);
-            boolean saveToDisk = !UnsafeMemoryManager.INSTANCE.isMemoryAvailable();
-            rowPage = new UnsafeCarbonRowPage(parameters.getNoDictionaryDimnesionColumn(),
-                parameters.getDimColCount() + parameters.getComplexDimColCount(),
-                parameters.getMeasureColCount(), parameters.getAggType(), memoryBlock, saveToDisk);
-            bytesAdded += rowPage.addRow(rowBatch[i]);
-          } catch (Exception e) {
-            LOGGER.error(
-                "exception occurred while trying to acquire a semaphore lock: " + e.getMessage());
-            throw new CarbonSortKeyAndGroupByException(e);
-          }
+      addBatch(rowBatch, size);
+    }
+  }
 
+  private void addBatch(Object[][] rowBatch, int size) throws CarbonSortKeyAndGroupByException {
+    for (int i = 0; i < size; i++) {
+      if (rowPage.canAdd()) {
+        bytesAdded += rowPage.addRow(rowBatch[i]);
+      } else {
+        try {
+          if (enableInMemoryIntermediateMerge) {
+            unsafeInMemoryIntermediateFileMerger.startInmemoryMergingIfPossible();
+          }
+          unsafeInMemoryIntermediateFileMerger.startFileMergingIfPossible();
+          semaphore.acquire();
+          dataSorterAndWriterExecutorService.submit(new DataSorterAndWriter(rowPage));
+          MemoryBlock memoryBlock = getMemoryBlock(inMemoryChunkSize);
+          boolean saveToDisk = !UnsafeMemoryManager.INSTANCE.isMemoryAvailable();
+          rowPage = new UnsafeCarbonRowPage(parameters.getNoDictionaryDimnesionColumn(),
+              parameters.getDimColCount() + parameters.getComplexDimColCount(),
+              parameters.getMeasureColCount(), parameters.getAggType(), memoryBlock, saveToDisk);
+          bytesAdded += rowPage.addRow(rowBatch[i]);
+        } catch (Exception e) {
+          LOGGER.error(
+              "exception occurred while trying to acquire a semaphore lock: " + e.getMessage());
+          throw new CarbonSortKeyAndGroupByException(e);
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/72bbb62b/processing/src/main/java/org/apache/carbondata/processing/util/CarbonDataProcessorUtil.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/util/CarbonDataProcessorUtil.java b/processing/src/main/java/org/apache/carbondata/processing/util/CarbonDataProcessorUtil.java
index a4de24e..12a453d 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/util/CarbonDataProcessorUtil.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/util/CarbonDataProcessorUtil.java
@@ -542,9 +542,11 @@ public final class CarbonDataProcessorUtil {
             configuration.getDataLoadProperty(CarbonCommonConstants.LOAD_SORT_SCOPE)
                 .toString());
       }
+      LOGGER.warn("sort scope is set to " + sortScope);
     } catch (Exception e) {
       sortScope = SortScopeOptions.getSortScope(CarbonCommonConstants.LOAD_SORT_SCOPE_DEFAULT);
-      LOGGER.warn("sort scope is set to " + sortScope);
+      LOGGER.warn("Exception occured while resolving sort scope. " +
+          "sort scope is set to " + sortScope);
     }
     return sortScope;
   }
@@ -567,8 +569,11 @@ public final class CarbonDataProcessorUtil {
             configuration.getDataLoadProperty(CarbonCommonConstants.LOAD_BATCH_SORT_SIZE_INMB)
                 .toString());
       }
+      LOGGER.warn("batch sort size is set to " + batchSortSizeInMb);
     } catch (Exception e) {
       batchSortSizeInMb = 0;
+      LOGGER.warn("Exception occured while resolving batch sort size. " +
+          "batch sort size is set to " + batchSortSizeInMb);
     }
     return batchSortSizeInMb;
   }


[15/42] carbondata git commit: Docs/format md files for pdf (#1)

Posted by ra...@apache.org.
Docs/format md files for pdf (#1)

* Modified MDs for PdfGeneration


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/0c6f5f34
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/0c6f5f34
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/0c6f5f34

Branch: refs/heads/branch-1.1
Commit: 0c6f5f34c3724d40aa7aac08ee63a7193167782b
Parents: a8b6726
Author: Jatin Demla <ja...@gmail.com>
Authored: Wed May 24 00:46:22 2017 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 12:57:23 2017 +0530

----------------------------------------------------------------------
 docs/configuration-parameters.md     |  8 ++--
 docs/data-management.md              |  9 ----
 docs/ddl-operation-on-carbondata.md  | 35 ++++++++------
 docs/dml-operation-on-carbondata.md  |  2 +-
 docs/faq.md                          | 20 ++++++--
 docs/file-structure-of-carbondata.md |  7 +--
 docs/installation-guide.md           | 78 ++++++++++++++++---------------
 docs/quick-start-guide.md            | 39 ++++++++++++----
 docs/troubleshooting.md              |  9 ++--
 docs/useful-tips-on-carbondata.md    |  2 +-
 10 files changed, 121 insertions(+), 88 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/0c6f5f34/docs/configuration-parameters.md
----------------------------------------------------------------------
diff --git a/docs/configuration-parameters.md b/docs/configuration-parameters.md
index e4f8f33..c63f73d 100644
--- a/docs/configuration-parameters.md
+++ b/docs/configuration-parameters.md
@@ -114,7 +114,7 @@ This section provides the details of all the configurations required for CarbonD
 
 | Parameter | Default Value | Description |
 |-----------------------------------|---------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| carbon.numberof.preserve.segments | 0 | If the user wants to preserve some number of segments from being compacted then he can set this property. Example: carbon.numberof.preserve.segments=2 then 2 latest segments will always be excluded from the compaction. No segments will be preserved by default. |
+| carbon.numberof.preserve.segments | 0 | If the user wants to preserve some number of segments from being compacted then he can set this property. Example: carbon.numberof.preserve.segments = 2 then 2 latest segments will always be excluded from the compaction. No segments will be preserved by default. |
 | carbon.allowed.compaction.days | 0 | Compaction will merge the segments which are loaded with in the specific number of days configured. Example: If the configuration is 2, then the segments which are loaded in the time frame of 2 days only will get merged. Segments which are loaded 2 days apart will not be merged. This is disabled by default. |
 | carbon.enable.auto.load.merge | false | To enable compaction while data loading. |
 
@@ -130,9 +130,9 @@ This section provides the details of all the configurations required for CarbonD
   
 | Parameter | Default Value | Description |
 |---------------------------------------|---------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| high.cardinality.identify.enable | true | If the parameter is true, the high cardinality columns of the dictionary code are automatically recognized and these columns will not be used as global dictionary encoding. If the parameter is false, all dictionary encoding columns are used as dictionary encoding. The high cardinality column must meet the following requirements: value of cardinality > configured value of high.cardinalityEqually, the value of cardinality is higher than the threshold.value of cardinality/ row number x 100 > configured value of high.cardinality.row.count.percentageEqually, the ratio of the cardinality value to data row number is higher than the configured percentage. |
-| high.cardinality.threshold | 1000000 | It is a threshold to identify high cardinality of the columns.If the value of columns' cardinality > the configured value, then the columns are excluded from dictionary encoding. |
-| high.cardinality.row.count.percentage | 80 | Percentage to identify whether column cardinality is more than configured percent of total row count.Configuration value formula:Value of cardinality/ row number x 100 > configured value of high.cardinality.row.count.percentageThe value of the parameter must be larger than 0. |
+| high.cardinality.identify.enable | true | If the parameter is true, the high cardinality columns of the dictionary code are automatically recognized and these columns will not be used as global dictionary encoding. If the parameter is false, all dictionary encoding columns are used as dictionary encoding. The high cardinality column must meet the following requirements: value of cardinality > configured value of high.cardinality. Equally, the value of cardinality is higher than the threshold.value of cardinality/ row number x 100 > configured value of high.cardinality.row.count.percentage. Equally, the ratio of the cardinality value to data row number is higher than the configured percentage. |
+| high.cardinality.threshold | 1000000  | It is a threshold to identify high cardinality of the columns.If the value of columns' cardinality > the configured value, then the columns are excluded from dictionary encoding. |
+| high.cardinality.row.count.percentage | 80 | Percentage to identify whether column cardinality is more than configured percent of total row count.Configuration value formula:Value of cardinality/ row number x 100 > configured value of high.cardinality.row.count.percentage. The value of the parameter must be larger than 0. |
 | carbon.cutOffTimestamp | 1970-01-01 05:30:00 | Sets the start date for calculating the timestamp. Java counts the number of milliseconds from start of "1970-01-01 00:00:00". This property is used to customize the start of position. For example "2000-01-01 00:00:00". The date must be in the form "carbon.timestamp.format". NOTE: The CarbonData supports data store up to 68 years from the cut-off time defined. For example, if the cut-off time is 1970-01-01 05:30:00, then the data can be stored up to 2038-01-01 05:30:00. |
 | carbon.timegranularity | SECOND | The property used to set the data granularity level DAY, HOUR, MINUTE, or SECOND. |
   

http://git-wip-us.apache.org/repos/asf/carbondata/blob/0c6f5f34/docs/data-management.md
----------------------------------------------------------------------
diff --git a/docs/data-management.md b/docs/data-management.md
index 42411de..81866a1 100644
--- a/docs/data-management.md
+++ b/docs/data-management.md
@@ -155,12 +155,3 @@ CLEAN FILES FOR TABLE table1
     To update we need to specify the column expression with an optional filter condition(s).
 
     For update commands refer to [DML operations on CarbonData](dml-operation-on-carbondata.md).
-
-
-    
-
-
-
-
- 
- 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/0c6f5f34/docs/ddl-operation-on-carbondata.md
----------------------------------------------------------------------
diff --git a/docs/ddl-operation-on-carbondata.md b/docs/ddl-operation-on-carbondata.md
index 6222714..66c9d30 100644
--- a/docs/ddl-operation-on-carbondata.md
+++ b/docs/ddl-operation-on-carbondata.md
@@ -20,7 +20,7 @@
 # DDL Operations on CarbonData
 This tutorial guides you through the data definition language support provided by CarbonData.
 
-## Overview 
+## Overview
 The following DDL operations are supported in CarbonData :
 
 * [CREATE TABLE](#create-table)
@@ -37,6 +37,7 @@ The following DDL operations are supported in CarbonData :
 
 ## CREATE TABLE
   This command can be used to create a CarbonData table by specifying the list of fields along with the table properties.
+
 ```
    CREATE TABLE [IF NOT EXISTS] [db_name.]table_name
                     [(col_name data_type , ...)]
@@ -49,9 +50,9 @@ The following DDL operations are supported in CarbonData :
 
 | Parameter | Description | Optional |
 |---------------|-----------------------------------------------------------------------------------------------------------------------------------------------|----------|
-| db_name | Name of the database. Database name should consist of alphanumeric characters and underscore(_) special character. | Yes |
-| field_list | Comma separated List of fields with data type. The field names should consist of alphanumeric characters and underscore(_) special character. | No |
-| table_name | The name of the table in Database. Table Name should consist of alphanumeric characters and underscore(_) special character. | No |
+| db_name | Name of the database. Database name should consist of alphanumeric characters and underscore(\_) special character. | Yes |
+| field_list | Comma separated List of fields with data type. The field names should consist of alphanumeric characters and underscore(\_) special character. | No |
+| table_name | The name of the table in Database. Table Name should consist of alphanumeric characters and underscore(\_) special character. | No |
 | STORED BY | "org.apache.carbondata.format", identifies and creates a CarbonData table. | No |
 | TBLPROPERTIES | List of CarbonData table properties. |  |
 
@@ -62,6 +63,7 @@ The following DDL operations are supported in CarbonData :
    - **Dictionary Encoding Configuration**
 
        Dictionary encoding is enabled by default for all String columns, and disabled for non-String columns. You can include and exclude columns for dictionary encoding.
+
 ```
        TBLPROPERTIES ('DICTIONARY_EXCLUDE'='column1, column2')
        TBLPROPERTIES ('DICTIONARY_INCLUDE'='column1, column2')
@@ -72,15 +74,17 @@ The following DDL operations are supported in CarbonData :
    - **Row/Column Format Configuration**
 
        Column groups with more than one column are stored in row format, instead of columnar format. By default, each column is a separate column group.
+
 ```
-TBLPROPERTIES ('COLUMN_GROUPS'='(column1, column2),
-(Column3,Column4,Column5)')
+       TBLPROPERTIES ('COLUMN_GROUPS'='(column1, column2),
+       (Column3,Column4,Column5)')
 ```
 
    - **Table Block Size Configuration**
 
      The block size of table files can be defined using the property TABLE_BLOCKSIZE. It accepts only integer values. The default value is 1024 MB and supports a range of 1 MB to 2048 MB.
      If you do not specify this value in the DDL command, default value is used.
+
 ```
        TBLPROPERTIES ('TABLE_BLOCKSIZE'='512')
 ```
@@ -91,6 +95,7 @@ TBLPROPERTIES ('COLUMN_GROUPS'='(column1, column2),
 
       Inverted index is very useful to improve compression ratio and query speed, especially for those low-cardinality columns which are in reward position.
       By default inverted index is enabled. The user can disable the inverted index creation for some columns.
+
 ```
        TBLPROPERTIES ('NO_INVERTED_INDEX'='column1, column3')
 ```
@@ -188,7 +193,7 @@ This command is used to add a new column to the existing table.
 |--------------------|-----------------------------------------------------------------------------------------------------------|
 | db_Name            | Name of the database. If this parameter is left unspecified, the current database is selected.            |
 | table_name         | Name of the existing table.                                                                               |
-| col_name data_type | Name of comma-separated column with data type. Column names contain letters, digits, and underscores (_). |
+| col_name data_type | Name of comma-separated column with data type. Column names contain letters, digits, and underscores (\_). |
 
 NOTE: Do not name the column after name, tupleId, PositionId, and PositionReference when creating Carbon tables because they are used internally by UPDATE, DELETE, and secondary index.
 
@@ -207,15 +212,18 @@ NOTE: Do not name the column after name, tupleId, PositionId, and PositionRefere
 ```
 
 ```
-    ALTER TABLE carbon ADD COLUMNS (a1 INT, b1 STRING) TBLPROPERTIES('DICTIONARY_EXCLUDE'='b1');
+    ALTER TABLE carbon ADD COLUMNS (a1 INT, b1 STRING)
+    TBLPROPERTIES('DICTIONARY_EXCLUDE'='b1');
 ```
 
 ```
-    ALTER TABLE carbon ADD COLUMNS (a1 INT, b1 STRING) TBLPROPERTIES('DICTIONARY_INCLUDE'='a1');
+    ALTER TABLE carbon ADD COLUMNS (a1 INT, b1 STRING)
+    TBLPROPERTIES('DICTIONARY_INCLUDE'='a1');
 ```
 
 ```
-    ALTER TABLE carbon ADD COLUMNS (a1 INT, b1 STRING) TBLPROPERTIES('DEFAULT.VALUE.a1'='10');
+    ALTER TABLE carbon ADD COLUMNS (a1 INT, b1 STRING)
+    TBLPROPERTIES('DEFAULT.VALUE.a1'='10');
 ```
 
 
@@ -232,7 +240,7 @@ This command is used to delete a existing column or multiple columns in a table.
 |------------|----------------------------------------------------------------------------------------------------------|
 | db_Name    | Name of the database. If this parameter is left unspecified, the current database is selected.           |
 | table_name | Name of the existing table.                                                                              |
-| col_name   | Name of comma-separated column with data type. Column names contain letters, digits, and underscores (_) |
+| col_name   | Name of comma-separated column with data type. Column names contain letters, digits, and underscores (\_) |
 
 #### Usage Guidelines
 
@@ -270,7 +278,8 @@ If the table contains 4 columns namely a1, b1, c1, and d1.
 This command is used to change the data type from INT to BIGINT or decimal precision from lower to higher.
 
 ```
-    ALTER TABLE [db_name.]table_name CHANGE col_name col_name changed_column_type;
+    ALTER TABLE [db_name.]table_name
+    CHANGE col_name col_name changed_column_type;
 ```
 
 #### Parameter Description
@@ -278,7 +287,7 @@ This command is used to change the data type from INT to BIGINT or decimal preci
 |---------------------|-----------------------------------------------------------------------------------------------------------|
 | db_Name             | Name of the database. If this parameter is left unspecified, the current database is selected.            |
 | table_name          | Name of the existing table.                                                                               |
-| col_name            | Name of comma-separated column with data type. Column names contain letters, digits, and underscores (_). |
+| col_name            | Name of comma-separated column with data type. Column names contain letters, digits, and underscores (\_). |
 | changed_column_type | The change in the data type.                                                                              |
 
 #### Usage Guidelines

http://git-wip-us.apache.org/repos/asf/carbondata/blob/0c6f5f34/docs/dml-operation-on-carbondata.md
----------------------------------------------------------------------
diff --git a/docs/dml-operation-on-carbondata.md b/docs/dml-operation-on-carbondata.md
index 579b9cb..f9d9f45 100644
--- a/docs/dml-operation-on-carbondata.md
+++ b/docs/dml-operation-on-carbondata.md
@@ -107,7 +107,7 @@ You can use the following options to load data:
 - **COMPLEX_DELIMITER_LEVEL_2:** Split the complex type nested data column in a row. Applies level_1 delimiter & applies level_2 based on complex data type (eg., a:b$c:d --> Array> = {{a,b},{c,d}}).
 
     ```
-    OPTIONS('COMPLEX_DELIMITER_LEVEL_2'=':') 
+    OPTIONS('COMPLEX_DELIMITER_LEVEL_2'=':')
     ```
 
 - **ALL_DICTIONARY_PATH:** All dictionary files path.

http://git-wip-us.apache.org/repos/asf/carbondata/blob/0c6f5f34/docs/faq.md
----------------------------------------------------------------------
diff --git a/docs/faq.md b/docs/faq.md
index cae4f97..88db7d5 100644
--- a/docs/faq.md
+++ b/docs/faq.md
@@ -58,12 +58,16 @@ To ignore the Bad Records from getting stored in the raw csv, we need to set the
 The store location specified while creating carbon session is used by the CarbonData to store the meta data like the schema, dictionary files, dictionary meta data and sort indexes.
 
 Try creating ``carbonsession`` with ``storepath`` specified in the following manner :
+
 ```
-val carbon = SparkSession.builder().config(sc.getConf).getOrCreateCarbonSession(<store_path>)
+val carbon = SparkSession.builder().config(sc.getConf)
+             .getOrCreateCarbonSession(<store_path>)
 ```
 Example:
+
 ```
-val carbon = SparkSession.builder().config(sc.getConf).getOrCreateCarbonSession("hdfs://localhost:9000/carbon/store ")
+val carbon = SparkSession.builder().config(sc.getConf)
+             .getOrCreateCarbonSession("hdfs://localhost:9000/carbon/store")
 ```
 
 ## What is Carbon Lock Type?
@@ -77,7 +81,8 @@ In order to build CarbonData project it is necessary to specify the spark profil
 
 ## How Carbon will behave when execute insert operation in abnormal scenarios?
 Carbon support insert operation, you can refer to the syntax mentioned in [DML Operations on CarbonData](http://carbondata.apache.org/dml-operation-on-carbondata).
-First, create a soucre table in spark-sql and load data into this created table. 
+First, create a soucre table in spark-sql and load data into this created table.
+
 ```
 CREATE TABLE source_table(
 id String,
@@ -85,6 +90,7 @@ name String,
 city String)
 ROW FORMAT DELIMITED FIELDS TERMINATED BY ",";
 ```
+
 ```
 SELECT * FROM source_table;
 id  name    city
@@ -92,9 +98,11 @@ id  name    city
 2   erlu    hangzhou
 3   davi    shenzhen
 ```
+
 **Scenario 1** :
 
 Suppose, the column order in carbon table is different from source table, use script "SELECT * FROM carbon table" to query, will get the column order similar as source table, rather than in carbon table's column order as expected. 
+
 ```
 CREATE TABLE IF NOT EXISTS carbon_table(
 id String,
@@ -102,9 +110,11 @@ city String,
 name String)
 STORED BY 'carbondata';
 ```
+
 ```
 INSERT INTO TABLE carbon_table SELECT * FROM source_table;
 ```
+
 ```
 SELECT * FROM carbon_table;
 id  city    name
@@ -112,9 +122,11 @@ id  city    name
 2   erlu    hangzhou
 3   davi    shenzhen
 ```
+
 As result shows, the second column is city in carbon table, but what inside is name, such as jack. This phenomenon is same with insert data into hive table.
 
 If you want to insert data into corresponding column in carbon table, you have to specify the column order same in insert statment. 
+
 ```
 INSERT INTO TABLE carbon_table SELECT id, city, name FROM source_table;
 ```
@@ -122,9 +134,11 @@ INSERT INTO TABLE carbon_table SELECT id, city, name FROM source_table;
 **Scenario 2** :
 
 Insert operation will be failed when the number of column in carbon table is different from the column specified in select statement. The following insert operation will be failed.
+
 ```
 INSERT INTO TABLE carbon_table SELECT id, city FROM source_table;
 ```
+
 **Scenario 3** :
 
 When the column type in carbon table is different from the column specified in select statement. The insert operation will still success, but you may get NULL in result, because NULL will be substitute value when conversion type failed.

http://git-wip-us.apache.org/repos/asf/carbondata/blob/0c6f5f34/docs/file-structure-of-carbondata.md
----------------------------------------------------------------------
diff --git a/docs/file-structure-of-carbondata.md b/docs/file-structure-of-carbondata.md
index e6be48d..7ac234c 100644
--- a/docs/file-structure-of-carbondata.md
+++ b/docs/file-structure-of-carbondata.md
@@ -24,7 +24,7 @@ CarbonData files contain groups of data called blocklets, along with all require
 The file footer can be read once to build the indices in memory, which can be utilized for optimizing the scans and processing for all subsequent queries.
 
 ### Understanding CarbonData File Structure
-* Block : It would be as same as HDFS block, CarbonData creates one file for each data block, user can specify TABLE_BLOCKSIZE during creation table. Each file contains File Header, Blocklets and File Footer. 
+* Block : It would be as same as HDFS block, CarbonData creates one file for each data block, user can specify TABLE_BLOCKSIZE during creation table. Each file contains File Header, Blocklets and File Footer.
 
 ![CarbonData File Structure](../docs/images/carbon_data_file_structure_new.png?raw=true)
 
@@ -32,7 +32,7 @@ The file footer can be read once to build the indices in memory, which can be ut
 * File Footer : it contains Number of rows, segmentinfo ,all blocklets’ info and index, you can find the detail from the below diagram.
 * Blocklet : Rows are grouped to form a blocklet, the size of the blocklet is configurable and default size is 64MB, Blocklet contains Column Page groups for each column.
 * Column Page Group : Data of one column and it is further divided into pages, it is guaranteed to be contiguous in file.
-* Page : It has the data of one column and the number of row is fixed to 32000 size. 
+* Page : It has the data of one column and the number of row is fixed to 32000 size.
 
 ![CarbonData File Format](../docs/images/carbon_data_format_new.png?raw=true)
 
@@ -40,6 +40,3 @@ The file footer can be read once to build the indices in memory, which can be ut
 * Data Page: Contains the encoded data of a column of columns.
 * Row ID Page (optional): Contains the row ID mappings used when the data page is stored as an inverted index.
 * RLE Page (optional): Contains additional metadata used when the data page is RLE coded.
-
-
-

http://git-wip-us.apache.org/repos/asf/carbondata/blob/0c6f5f34/docs/installation-guide.md
----------------------------------------------------------------------
diff --git a/docs/installation-guide.md b/docs/installation-guide.md
index f4ca656..d9f27dd 100644
--- a/docs/installation-guide.md
+++ b/docs/installation-guide.md
@@ -54,24 +54,24 @@ followed by :
     
 6. In Spark node[master], configure the properties mentioned in the following table in `$SPARK_HOME/conf/spark-defaults.conf` file.
 
-   | Property | Value | Description |
-   |---------------------------------|-----------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------|
-   | spark.driver.extraJavaOptions | `-Dcarbon.properties.filepath=$SPARK_HOME/conf/carbon.properties` | A string of extra JVM options to pass to the driver. For instance, GC settings or other logging. |
-   | spark.executor.extraJavaOptions | `-Dcarbon.properties.filepath=$SPARK_HOME/conf/carbon.properties` | A string of extra JVM options to pass to executors. For instance, GC settings or other logging. **NOTE**: You can enter multiple values separated by space. |
+| Property | Value | Description |
+|---------------------------------|-----------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------|
+| spark.driver.extraJavaOptions | `-Dcarbon.properties.filepath = $SPARK_HOME/conf/carbon.properties` | A string of extra JVM options to pass to the driver. For instance, GC settings or other logging. |
+| spark.executor.extraJavaOptions | `-Dcarbon.properties.filepath = $SPARK_HOME/conf/carbon.properties` | A string of extra JVM options to pass to executors. For instance, GC settings or other logging. **NOTE**: You can enter multiple values separated by space. |
 
 7. Add the following properties in `$SPARK_HOME/conf/carbon.properties` file:
 
-   | Property             | Required | Description                                                                            | Example                             | Remark  |
-   |----------------------|----------|----------------------------------------------------------------------------------------|-------------------------------------|---------|
-   | carbon.storelocation | NO       | Location where data CarbonData will create the store and write the data in its own format. | hdfs://HOSTNAME:PORT/Opt/CarbonStore      | Propose to set HDFS directory |
+| Property             | Required | Description                                                                            | Example                             | Remark  |
+|----------------------|----------|----------------------------------------------------------------------------------------|-------------------------------------|---------|
+| carbon.storelocation | NO       | Location where data CarbonData will create the store and write the data in its own format. | hdfs://HOSTNAME:PORT/Opt/CarbonStore      | Propose to set HDFS directory |
 
 
 8. Verify the installation. For example:
 
-   ```
-   ./spark-shell --master spark://HOSTNAME:PORT --total-executor-cores 2
-   --executor-memory 2G
-   ```
+```
+./spark-shell --master spark://HOSTNAME:PORT --total-executor-cores 2
+--executor-memory 2G
+```
 
 **NOTE**: Make sure you have permissions for CarbonData JARs and files through which driver and executor will start.
 
@@ -98,37 +98,37 @@ To get started with CarbonData : [Quick Start](quick-start-guide.md), [DDL Opera
 
 3. Create `tar,gz` file of carbonlib folder and move it inside the carbonlib folder.
 
-    ```
-	cd $SPARK_HOME
-	tar -zcvf carbondata.tar.gz carbonlib/
-	mv carbondata.tar.gz carbonlib/
-    ```
+```
+cd $SPARK_HOME
+tar -zcvf carbondata.tar.gz carbonlib/
+mv carbondata.tar.gz carbonlib/
+```
 
 4. Configure the properties mentioned in the following table in `$SPARK_HOME/conf/spark-defaults.conf` file.
 
-   | Property | Description | Value |
-   |---------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------|
-   | spark.master | Set this value to run the Spark in yarn cluster mode. | Set yarn-client to run the Spark in yarn cluster mode. |
-   | spark.yarn.dist.files | Comma-separated list of files to be placed in the working directory of each executor. |`$SPARK_HOME/conf/carbon.properties` |
-   | spark.yarn.dist.archives | Comma-separated list of archives to be extracted into the working directory of each executor. |`$SPARK_HOME/carbonlib/carbondata.tar.gz` |
-   | spark.executor.extraJavaOptions | A string of extra JVM options to pass to executors. For instance  **NOTE**: You can enter multiple values separated by space. |`-Dcarbon.properties.filepath=carbon.properties` |
-   | spark.executor.extraClassPath | Extra classpath entries to prepend to the classpath of executors. **NOTE**: If SPARK_CLASSPATH is defined in spark-env.sh, then comment it and append the values in below parameter spark.driver.extraClassPath |`carbondata.tar.gz/carbonlib/*` |
-   | spark.driver.extraClassPath | Extra classpath entries to prepend to the classpath of the driver. **NOTE**: If SPARK_CLASSPATH is defined in spark-env.sh, then comment it and append the value in below parameter spark.driver.extraClassPath. |`$SPARK_HOME/carbonlib/*` |
-   | spark.driver.extraJavaOptions | A string of extra JVM options to pass to the driver. For instance, GC settings or other logging. |`-Dcarbon.properties.filepath=$SPARK_HOME/conf/carbon.properties` |
+| Property | Description | Value |
+|---------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------|
+| spark.master | Set this value to run the Spark in yarn cluster mode. | Set yarn-client to run the Spark in yarn cluster mode. |
+| spark.yarn.dist.files | Comma-separated list of files to be placed in the working directory of each executor. |`$SPARK_HOME/conf/carbon.properties` |
+| spark.yarn.dist.archives | Comma-separated list of archives to be extracted into the working directory of each executor. |`$SPARK_HOME/carbonlib/carbondata.tar.gz` |
+| spark.executor.extraJavaOptions | A string of extra JVM options to pass to executors. For instance  **NOTE**: You can enter multiple values separated by space. |`-Dcarbon.properties.filepath = carbon.properties` |
+| spark.executor.extraClassPath | Extra classpath entries to prepend to the classpath of executors. **NOTE**: If SPARK_CLASSPATH is defined in spark-env.sh, then comment it and append the values in below parameter spark.driver.extraClassPath |`carbondata.tar.gz/carbonlib/*` |
+| spark.driver.extraClassPath | Extra classpath entries to prepend to the classpath of the driver. **NOTE**: If SPARK_CLASSPATH is defined in spark-env.sh, then comment it and append the value in below parameter spark.driver.extraClassPath. |`$SPARK_HOME/carbonlib/*` |
+| spark.driver.extraJavaOptions | A string of extra JVM options to pass to the driver. For instance, GC settings or other logging. |`-Dcarbon.properties.filepath = $SPARK_HOME/conf/carbon.properties` |
 
 
 5. Add the following properties in `$SPARK_HOME/conf/carbon.properties`:
 
-   | Property | Required | Description | Example | Default Value |
-   |----------------------|----------|----------------------------------------------------------------------------------------|-------------------------------------|---------------|
-   | carbon.storelocation | NO | Location where CarbonData will create the store and write the data in its own format. | hdfs://HOSTNAME:PORT/Opt/CarbonStore | Propose to set HDFS directory|
+| Property | Required | Description | Example | Default Value |
+|----------------------|----------|----------------------------------------------------------------------------------------|-------------------------------------|---------------|
+| carbon.storelocation | NO | Location where CarbonData will create the store and write the data in its own format. | hdfs://HOSTNAME:PORT/Opt/CarbonStore | Propose to set HDFS directory|
 
 6. Verify the installation.
 
-   ```
-     ./bin/spark-shell --master yarn-client --driver-memory 1g
-     --executor-cores 2 --executor-memory 2G
-   ```
+```
+ ./bin/spark-shell --master yarn-client --driver-memory 1g
+ --executor-cores 2 --executor-memory 2G
+```
   **NOTE**: Make sure you have permissions for CarbonData JARs and files through which driver and executor will start.
 
   Getting started with CarbonData : [Quick Start](quick-start-guide.md), [DDL Operations on CarbonData](ddl-operation-on-carbondata.md)
@@ -141,11 +141,12 @@ To get started with CarbonData : [Quick Start](quick-start-guide.md), [DDL Opera
 
    b. Run the following command to start the CarbonData thrift server.
 
-   ```
-   ./bin/spark-submit --conf spark.sql.hive.thriftServer.singleSession=true
-   --class org.apache.carbondata.spark.thriftserver.CarbonThriftServer
-   $SPARK_HOME/carbonlib/$CARBON_ASSEMBLY_JAR <carbon_store_path>
-   ```
+```
+./bin/spark-submit
+--conf spark.sql.hive.thriftServer.singleSession=true
+--class org.apache.carbondata.spark.thriftserver.CarbonThriftServer
+$SPARK_HOME/carbonlib/$CARBON_ASSEMBLY_JAR <carbon_store_path>
+```
 
 | Parameter | Description | Example |
 |---------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------|
@@ -157,7 +158,8 @@ To get started with CarbonData : [Quick Start](quick-start-guide.md), [DDL Opera
    * Start with default memory and executors.
 
 ```
-./bin/spark-submit --conf spark.sql.hive.thriftServer.singleSession=true 
+./bin/spark-submit
+--conf spark.sql.hive.thriftServer.singleSession=true
 --class org.apache.carbondata.spark.thriftserver.CarbonThriftServer 
 $SPARK_HOME/carbonlib
 /carbondata_2.10-0.1.0-incubating-SNAPSHOT-shade-hadoop2.7.2.jar 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/0c6f5f34/docs/quick-start-guide.md
----------------------------------------------------------------------
diff --git a/docs/quick-start-guide.md b/docs/quick-start-guide.md
index c7ad73b..1c490ac 100644
--- a/docs/quick-start-guide.md
+++ b/docs/quick-start-guide.md
@@ -61,22 +61,31 @@ import org.apache.spark.sql.CarbonSession._
 * Create a CarbonSession :
 
 ```
-val carbon = SparkSession.builder().config(sc.getConf).getOrCreateCarbonSession("<hdfs store path>")
+val carbon = SparkSession.builder().config(sc.getConf)
+             .getOrCreateCarbonSession("<hdfs store path>")
 ```
-**NOTE**: By default metastore location is pointed to `../carbon.metastore`, user can provide own metastore location to CarbonSession like `SparkSession.builder().config(sc.getConf).getOrCreateCarbonSession("<hdfs store path>", "<local metastore path>")`
+**NOTE**: By default metastore location is pointed to `../carbon.metastore`, user can provide own metastore location to CarbonSession like `SparkSession.builder().config(sc.getConf)
+.getOrCreateCarbonSession("<hdfs store path>", "<local metastore path>")`
 
 #### Executing Queries
 
 ###### Creating a Table
 
 ```
-scala>carbon.sql("CREATE TABLE IF NOT EXISTS test_table(id string, name string, city string, age Int) STORED BY 'carbondata'")
+scala>carbon.sql("CREATE TABLE
+                        IF NOT EXISTS test_table(
+                                  id string,
+                                  name string,
+                                  city string,
+                                  age Int)
+                       STORED BY 'carbondata'")
 ```
 
 ###### Loading Data to a Table
 
 ```
-scala>carbon.sql("LOAD DATA INPATH 'sample.csv file path' INTO TABLE test_table")
+scala>carbon.sql("LOAD DATA INPATH 'sample.csv file path'
+                  INTO TABLE test_table")
 ```
 **NOTE**: Please provide the real file path of `sample.csv` for the above script.
 
@@ -85,7 +94,9 @@ scala>carbon.sql("LOAD DATA INPATH 'sample.csv file path' INTO TABLE test_table"
 ```
 scala>carbon.sql("SELECT * FROM test_table").show()
 
-scala>carbon.sql("SELECT city, avg(age), sum(age) FROM test_table GROUP BY city").show()
+scala>carbon.sql("SELECT city, avg(age), sum(age)
+                  FROM test_table
+                  GROUP BY city").show()
 ```
 
 ## Interactive Analysis with Spark Shell Version 1.6
@@ -97,7 +108,8 @@ Start Spark shell by running the following command in the Spark directory:
 ```
 ./bin/spark-shell --jars <carbondata assembly jar path>
 ```
-**NOTE**: Assembly jar will be available after [building CarbonData](https://github.com/apache/carbondata/blob/master/build/README.md) and can be copied from `./assembly/target/scala-2.1x/carbondata_xxx.jar`
+**NOTE**: Assembly jar will be available after [building CarbonData](https://github.com/apache/carbondata/
+blob/master/build/README.md) and can be copied from `./assembly/target/scala-2.1x/carbondata_xxx.jar`
 
 **NOTE**: In this shell, SparkContext is readily available as `sc`.
 
@@ -119,7 +131,13 @@ val cc = new CarbonContext(sc, "<hdfs store path>")
 ###### Creating a Table
 
 ```
-scala>cc.sql("CREATE TABLE IF NOT EXISTS test_table (id string, name string, city string, age Int) STORED BY 'carbondata'")
+scala>cc.sql("CREATE TABLE
+              IF NOT EXISTS test_table (
+                         id string,
+                         name string,
+                         city string,
+                         age Int)
+              STORED BY 'carbondata'")
 ```
 To see the table created :
 
@@ -130,7 +148,8 @@ scala>cc.sql("SHOW TABLES").show()
 ###### Loading Data to a Table
 
 ```
-scala>cc.sql("LOAD DATA INPATH 'sample.csv file path' INTO TABLE test_table")
+scala>cc.sql("LOAD DATA INPATH 'sample.csv file path'
+              INTO TABLE test_table")
 ```
 **NOTE**: Please provide the real file path of `sample.csv` for the above script.
 
@@ -138,5 +157,7 @@ scala>cc.sql("LOAD DATA INPATH 'sample.csv file path' INTO TABLE test_table")
 
 ```
 scala>cc.sql("SELECT * FROM test_table").show()
-scala>cc.sql("SELECT city, avg(age), sum(age) FROM test_table GROUP BY city").show()
+scala>cc.sql("SELECT city, avg(age), sum(age)
+              FROM test_table
+              GROUP BY city").show()
 ```

http://git-wip-us.apache.org/repos/asf/carbondata/blob/0c6f5f34/docs/troubleshooting.md
----------------------------------------------------------------------
diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md
index 27ec8e3..5464997 100644
--- a/docs/troubleshooting.md
+++ b/docs/troubleshooting.md
@@ -62,11 +62,10 @@ who are building, deploying, and using CarbonData.
 
   2. Use the following command :
 
-    ```
-     "mvn -Pspark-2.1 -Dspark.version {yourSparkVersion} clean package"
-    ```
-
-    Note :  Refrain from using "mvn clean package" without specifying the profile.
+```
+"mvn -Pspark-2.1 -Dspark.version {yourSparkVersion} clean package"
+```
+Note :  Refrain from using "mvn clean package" without specifying the profile.
 
 ## Failed to execute load query on cluster.
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/0c6f5f34/docs/useful-tips-on-carbondata.md
----------------------------------------------------------------------
diff --git a/docs/useful-tips-on-carbondata.md b/docs/useful-tips-on-carbondata.md
index bfddf29..40a3947 100644
--- a/docs/useful-tips-on-carbondata.md
+++ b/docs/useful-tips-on-carbondata.md
@@ -175,7 +175,7 @@ excessive memory usage.
 | Parameter | Default Value | Description/Tuning |
 |-----------|-------------|--------|
 |carbon.number.of.cores.while.loading|Default: 2.This value should be >= 2|Specifies the number of cores used for data processing during data loading in CarbonData. |
-|carbon.sort.size|Data loading|Default: 100000. The value should be >= 100.|Threshhold to write local file in sort step when loading data|
+|carbon.sort.size|Default: 100000. The value should be >= 100.|Threshhold to write local file in sort step when loading data|
 |carbon.sort.file.write.buffer.size|Default:  50000.|DataOutputStream buffer. |
 |carbon.number.of.cores.block.sort|Default: 7 | If you have huge memory and cpus, increase it as you will|
 |carbon.merge.sort.reader.thread|Default: 3 |Specifies the number of cores used for temp file merging during data loading in CarbonData.|


[41/42] carbondata git commit: Fixed batch load issue

Posted by ra...@apache.org.
Fixed batch load issue


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/02f06fd3
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/02f06fd3
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/02f06fd3

Branch: refs/heads/branch-1.1
Commit: 02f06fd3058ed4bdce1c65e82d80694630c20c82
Parents: 72bbb62
Author: ravipesala <ra...@gmail.com>
Authored: Tue Jun 13 23:13:16 2017 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 13:39:14 2017 +0530

----------------------------------------------------------------------
 .../newflow/sort/impl/UnsafeBatchParallelReadMergeSorterImpl.java | 3 ---
 1 file changed, 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/02f06fd3/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/impl/UnsafeBatchParallelReadMergeSorterImpl.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/impl/UnsafeBatchParallelReadMergeSorterImpl.java b/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/impl/UnsafeBatchParallelReadMergeSorterImpl.java
index 20fd48b..f1b4a80 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/impl/UnsafeBatchParallelReadMergeSorterImpl.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/impl/UnsafeBatchParallelReadMergeSorterImpl.java
@@ -197,9 +197,6 @@ public class UnsafeBatchParallelReadMergeSorterImpl extends AbstractMergeSorter
 
     private void createSortDataRows() {
       int inMemoryChunkSizeInMB = CarbonProperties.getInstance().getSortMemoryChunkSizeInMB();
-      if (inMemoryChunkSizeInMB > sortParameters.getBatchSortSizeinMb()) {
-        inMemoryChunkSizeInMB = sortParameters.getBatchSortSizeinMb();
-      }
       this.finalMerger = new UnsafeSingleThreadFinalSortFilesMerger(sortParameters,
           sortParameters.getTempFileLocation());
       unsafeIntermediateFileMerger = new UnsafeIntermediateMerger(sortParameters);


[25/42] carbondata git commit: Supported IUD for vector reader

Posted by ra...@apache.org.
Supported IUD for vector reader

Fixed commets


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/64f973e8
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/64f973e8
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/64f973e8

Branch: refs/heads/branch-1.1
Commit: 64f973e86b730e6454ef9b6d8a1e50dd6e8a85e5
Parents: ef583af
Author: ravipesala <ra...@gmail.com>
Authored: Wed May 31 20:54:49 2017 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 13:15:42 2017 +0530

----------------------------------------------------------------------
 .../DictionaryBasedVectorResultCollector.java   |   5 +-
 .../core/scan/result/AbstractScannedResult.java |  25 +++-
 .../scan/result/vector/CarbonColumnVector.java  |   9 ++
 .../scan/result/vector/CarbonColumnarBatch.java |  33 ++++-
 .../dataload/TestBatchSortDataLoad.scala        |  20 +--
 .../iud/UpdateCarbonTableTestCase.scala         |   2 +-
 .../vectorreader/ColumnarVectorWrapper.java     | 130 ++++++++++++++++---
 .../VectorizedCarbonRecordReader.java           |   5 +-
 .../spark/sql/hive/CarbonAnalysisRules.scala    |   8 +-
 .../spark/sql/hive/CarbonSessionState.scala     |   3 +-
 10 files changed, 198 insertions(+), 42 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/64f973e8/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedVectorResultCollector.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedVectorResultCollector.java b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedVectorResultCollector.java
index 91afe77..7a8fe06 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedVectorResultCollector.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedVectorResultCollector.java
@@ -144,6 +144,8 @@ public class DictionaryBasedVectorResultCollector extends AbstractScannedResultC
         return;
       }
       fillColumnVectorDetails(columnarBatch, rowCounter, requiredRows);
+      scannedResult.markFilteredRows(
+          columnarBatch, rowCounter, requiredRows, columnarBatch.getRowCounter());
       scanAndFillResult(scannedResult, columnarBatch, rowCounter, availableRows, requiredRows);
     }
   }
@@ -162,7 +164,8 @@ public class DictionaryBasedVectorResultCollector extends AbstractScannedResultC
       // Or set the row counter.
       scannedResult.setRowCounter(rowCounter + requiredRows);
     }
-    columnarBatch.setActualSize(columnarBatch.getActualSize() + requiredRows);
+    columnarBatch.setActualSize(
+        columnarBatch.getActualSize() + requiredRows - columnarBatch.getRowsFilteredCount());
     columnarBatch.setRowCounter(columnarBatch.getRowCounter() + requiredRows);
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/64f973e8/core/src/main/java/org/apache/carbondata/core/scan/result/AbstractScannedResult.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/result/AbstractScannedResult.java b/core/src/main/java/org/apache/carbondata/core/scan/result/AbstractScannedResult.java
index e57a290..a1074ea 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/result/AbstractScannedResult.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/result/AbstractScannedResult.java
@@ -34,6 +34,7 @@ import org.apache.carbondata.core.scan.executor.infos.BlockExecutionInfo;
 import org.apache.carbondata.core.scan.executor.infos.KeyStructureInfo;
 import org.apache.carbondata.core.scan.filter.GenericQueryType;
 import org.apache.carbondata.core.scan.result.vector.CarbonColumnVector;
+import org.apache.carbondata.core.scan.result.vector.CarbonColumnarBatch;
 import org.apache.carbondata.core.scan.result.vector.ColumnVectorInfo;
 import org.apache.carbondata.core.util.CarbonUtil;
 import org.apache.carbondata.core.util.path.CarbonTablePath;
@@ -283,7 +284,8 @@ public abstract class AbstractScannedResult {
         String data = getBlockletId();
         if (CarbonCommonConstants.CARBON_IMPLICIT_COLUMN_TUPLEID
             .equals(columnVectorInfo.dimension.getColumnName())) {
-          data = data + CarbonCommonConstants.FILE_SEPARATOR + j;
+          data = data + CarbonCommonConstants.FILE_SEPARATOR +
+              (rowMapping == null ? j : rowMapping[pageCounter][j]);
         }
         vector.putBytes(vectorOffset++, offset, data.length(), data.getBytes());
       }
@@ -638,4 +640,25 @@ public abstract class AbstractScannedResult {
       BlockletLevelDeleteDeltaDataCache blockletDeleteDeltaCache) {
     this.blockletDeleteDeltaCache = blockletDeleteDeltaCache;
   }
+
+  /**
+   * Mark the filtered rows in columnar batch. These rows will not be added to vector batches later.
+   * @param columnarBatch
+   * @param startRow
+   * @param size
+   * @param vectorOffset
+   */
+  public void markFilteredRows(CarbonColumnarBatch columnarBatch, int startRow, int size,
+      int vectorOffset) {
+    if (blockletDeleteDeltaCache != null) {
+      int len = startRow + size;
+      for (int i = startRow; i < len; i++) {
+        int rowId = rowMapping != null ? rowMapping[pageCounter][i] : i;
+        if (blockletDeleteDeltaCache.contains(rowId)) {
+          columnarBatch.markFiltered(vectorOffset);
+        }
+        vectorOffset++;
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/64f973e8/core/src/main/java/org/apache/carbondata/core/scan/result/vector/CarbonColumnVector.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/result/vector/CarbonColumnVector.java b/core/src/main/java/org/apache/carbondata/core/scan/result/vector/CarbonColumnVector.java
index 4952e07..a3eb48b 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/result/vector/CarbonColumnVector.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/result/vector/CarbonColumnVector.java
@@ -17,10 +17,15 @@
 
 package org.apache.carbondata.core.scan.result.vector;
 
+import org.apache.spark.sql.types.DataType;
 import org.apache.spark.sql.types.Decimal;
 
 public interface CarbonColumnVector {
 
+  void putBoolean(int rowId, boolean value);
+
+  void putFloat(int rowId, float value);
+
   void putShort(int rowId, short value);
 
   void putShorts(int rowId, int count, short value);
@@ -59,4 +64,8 @@ public interface CarbonColumnVector {
 
   void reset();
 
+  DataType getType();
+
+  void setFilteredRowsExist(boolean filteredRowsExist);
+
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/64f973e8/core/src/main/java/org/apache/carbondata/core/scan/result/vector/CarbonColumnarBatch.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/result/vector/CarbonColumnarBatch.java b/core/src/main/java/org/apache/carbondata/core/scan/result/vector/CarbonColumnarBatch.java
index faeffde..cfc2f16 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/result/vector/CarbonColumnarBatch.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/result/vector/CarbonColumnarBatch.java
@@ -17,6 +17,8 @@
 
 package org.apache.carbondata.core.scan.result.vector;
 
+import java.util.Arrays;
+
 public class CarbonColumnarBatch {
 
   public CarbonColumnVector[] columnVectors;
@@ -27,9 +29,15 @@ public class CarbonColumnarBatch {
 
   private int rowCounter;
 
-  public CarbonColumnarBatch(CarbonColumnVector[] columnVectors, int batchSize) {
+  private boolean[] filteredRows;
+
+  private int rowsFiltered;
+
+  public CarbonColumnarBatch(CarbonColumnVector[] columnVectors, int batchSize,
+      boolean[] filteredRows) {
     this.columnVectors = columnVectors;
     this.batchSize = batchSize;
+    this.filteredRows = filteredRows;
   }
 
   public int getBatchSize() {
@@ -47,6 +55,8 @@ public class CarbonColumnarBatch {
   public void reset() {
     actualSize = 0;
     rowCounter = 0;
+    rowsFiltered = 0;
+    Arrays.fill(filteredRows, false);
     for (int i = 0; i < columnVectors.length; i++) {
       columnVectors[i].reset();
     }
@@ -59,4 +69,25 @@ public class CarbonColumnarBatch {
   public void setRowCounter(int rowCounter) {
     this.rowCounter = rowCounter;
   }
+
+  /**
+   * Mark the rows as filterd first before filling the batch, so that these rows will not be added
+   * to vector batches.
+   * @param rowId
+   */
+  public void markFiltered(int rowId) {
+    if (!filteredRows[rowId]) {
+      filteredRows[rowId] = true;
+      rowsFiltered++;
+    }
+    if (rowsFiltered == 1) {
+      for (int i = 0; i < columnVectors.length; i++) {
+        columnVectors[i].setFilteredRowsExist(true);
+      }
+    }
+  }
+
+  public int getRowsFilteredCount() {
+    return rowsFiltered;
+  }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/64f973e8/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestBatchSortDataLoad.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestBatchSortDataLoad.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestBatchSortDataLoad.scala
index 43bcac8..d53b5e5 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestBatchSortDataLoad.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestBatchSortDataLoad.scala
@@ -36,7 +36,7 @@ class TestBatchSortDataLoad extends QueryTest with BeforeAndAfterAll {
     val writer = new BufferedWriter(new FileWriter(file))
     writer.write("c1,c2,c3, c4, c5, c6, c7, c8, c9, c10")
     writer.newLine()
-    for(i <- 0 until 200000) {
+    for(i <- 0 until 100000) {
       writer.write("a" + i%1000 + "," +
                    "b" + i%1000 + "," +
                    "c" + i%1000 + "," +
@@ -84,9 +84,9 @@ class TestBatchSortDataLoad extends QueryTest with BeforeAndAfterAll {
     sql(s"LOAD DATA LOCAL INPATH '$filePath' into table carbon_load1 " +
         s"OPTIONS('sort_scope'='batch_sort', 'batch_sort_size_inmb'='1')")
 
-    checkAnswer(sql("select count(*) from carbon_load1"), Seq(Row(200000)))
+    checkAnswer(sql("select count(*) from carbon_load1"), Seq(Row(100000)))
 
-    assert(getIndexfileCount("carbon_load1") == 10, "Something wrong in batch sort")
+    assert(getIndexfileCount("carbon_load1") == 5, "Something wrong in batch sort")
   }
 
   test("test batch sort load by passing option to load command and compare with normal load") {
@@ -115,7 +115,7 @@ class TestBatchSortDataLoad extends QueryTest with BeforeAndAfterAll {
         s"OPTIONS('sort_scope'='batch_sort', 'batch_sort_size_inmb'='1')")
     sql("alter table carbon_load1 compact 'major'")
     Thread.sleep(4000)
-    checkAnswer(sql("select count(*) from carbon_load1"), Seq(Row(800000)))
+    checkAnswer(sql("select count(*) from carbon_load1"), Seq(Row(400000)))
 
     assert(getIndexfileCount("carbon_load1", "0.1") == 1, "Something wrong in compaction after batch sort")
 
@@ -137,7 +137,7 @@ class TestBatchSortDataLoad extends QueryTest with BeforeAndAfterAll {
         s"OPTIONS('sort_scope'='batch_sort', 'batch_sort_size_inmb'='1')")
     sql(s"LOAD DATA LOCAL INPATH '$filePath' into table carbon_load5 ")
 
-    checkAnswer(sql("select count(*) from carbon_load5"), Seq(Row(800000)))
+    checkAnswer(sql("select count(*) from carbon_load5"), Seq(Row(400000)))
 
     checkAnswer(sql("select * from carbon_load1 where c1='a1' order by c1"),
       sql("select * from carbon_load5 where c1='a1' order by c1"))
@@ -165,9 +165,9 @@ class TestBatchSortDataLoad extends QueryTest with BeforeAndAfterAll {
     sql(s"LOAD DATA LOCAL INPATH '$filePath' into table carbon_load3 " +
         s"OPTIONS('sort_scope'='batch_sort', 'batch_sort_size_inmb'='1', 'single_pass'='true')")
 
-    checkAnswer(sql("select count(*) from carbon_load3"), Seq(Row(200000)))
+    checkAnswer(sql("select count(*) from carbon_load3"), Seq(Row(100000)))
 
-    assert(getIndexfileCount("carbon_load3") == 10, "Something wrong in batch sort")
+    assert(getIndexfileCount("carbon_load3") == 5, "Something wrong in batch sort")
 
     checkAnswer(sql("select * from carbon_load3 where c1='a1' order by c1"),
       sql("select * from carbon_load2 where c1='a1' order by c1"))
@@ -186,9 +186,9 @@ class TestBatchSortDataLoad extends QueryTest with BeforeAndAfterAll {
 
     sql(s"LOAD DATA LOCAL INPATH '$filePath' into table carbon_load4 " )
 
-    checkAnswer(sql("select count(*) from carbon_load4"), Seq(Row(200000)))
+    checkAnswer(sql("select count(*) from carbon_load4"), Seq(Row(100000)))
 
-    assert(getIndexfileCount("carbon_load4") == 10, "Something wrong in batch sort")
+    assert(getIndexfileCount("carbon_load4") == 5, "Something wrong in batch sort")
     CarbonProperties.getInstance().
       addProperty(CarbonCommonConstants.LOAD_SORT_SCOPE,
         CarbonCommonConstants.LOAD_SORT_SCOPE_DEFAULT)
@@ -206,7 +206,7 @@ class TestBatchSortDataLoad extends QueryTest with BeforeAndAfterAll {
 
     sql(s"LOAD DATA LOCAL INPATH '$filePath' into table carbon_load6 " )
 
-    checkAnswer(sql("select count(*) from carbon_load6"), Seq(Row(200000)))
+    checkAnswer(sql("select count(*) from carbon_load6"), Seq(Row(100000)))
 
     assert(getIndexfileCount("carbon_load6") == 1, "Something wrong in batch sort")
     CarbonProperties.getInstance().

http://git-wip-us.apache.org/repos/asf/carbondata/blob/64f973e8/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
index 25fe91b..7917b61 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
@@ -42,7 +42,7 @@ class UpdateCarbonTableTestCase extends QueryTest with BeforeAndAfterAll {
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.isHorizontalCompactionEnabled , "true")
     CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.ENABLE_VECTOR_READER , "false")
+      .addProperty(CarbonCommonConstants.ENABLE_VECTOR_READER , "true")
   }
 
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/64f973e8/integration/spark2/src/main/java/org/apache/carbondata/spark/vectorreader/ColumnarVectorWrapper.java
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/java/org/apache/carbondata/spark/vectorreader/ColumnarVectorWrapper.java b/integration/spark2/src/main/java/org/apache/carbondata/spark/vectorreader/ColumnarVectorWrapper.java
index f94c0b2..c3d2a87 100644
--- a/integration/spark2/src/main/java/org/apache/carbondata/spark/vectorreader/ColumnarVectorWrapper.java
+++ b/integration/spark2/src/main/java/org/apache/carbondata/spark/vectorreader/ColumnarVectorWrapper.java
@@ -20,78 +20,165 @@ package org.apache.carbondata.spark.vectorreader;
 import org.apache.carbondata.core.scan.result.vector.CarbonColumnVector;
 
 import org.apache.spark.sql.execution.vectorized.ColumnVector;
+import org.apache.spark.sql.types.DataType;
 import org.apache.spark.sql.types.Decimal;
 
 class ColumnarVectorWrapper implements CarbonColumnVector {
 
   private ColumnVector columnVector;
 
-  public ColumnarVectorWrapper(ColumnVector columnVector) {
+  private boolean[] filteredRows;
+
+  private int counter;
+
+  private boolean filteredRowsExist;
+
+  public ColumnarVectorWrapper(ColumnVector columnVector, boolean[] filteredRows) {
     this.columnVector = columnVector;
+    this.filteredRows = filteredRows;
+  }
+
+  @Override public void putBoolean(int rowId, boolean value) {
+    if (!filteredRows[rowId]) {
+      columnVector.putBoolean(counter++, value);
+    }
+  }
+
+  @Override public void putFloat(int rowId, float value) {
+    if (!filteredRows[rowId]) {
+      columnVector.putFloat(counter++, value);
+    }
   }
 
   @Override public void putShort(int rowId, short value) {
-    columnVector.putShort(rowId, value);
+    if (!filteredRows[rowId]) {
+      columnVector.putShort(counter++, value);
+    }
   }
 
   @Override public void putShorts(int rowId, int count, short value) {
-    columnVector.putShorts(rowId, count, value);
+    if (filteredRowsExist) {
+      for (int i = 0; i < count; i++) {
+        if (!filteredRows[rowId]) {
+          putShort(counter++, value);
+        }
+        rowId++;
+      }
+    } else {
+      columnVector.putShorts(rowId, count, value);
+    }
   }
 
   @Override public void putInt(int rowId, int value) {
-    columnVector.putInt(rowId, value);
+    if (!filteredRows[rowId]) {
+      columnVector.putInt(counter++, value);
+    }
   }
 
   @Override public void putInts(int rowId, int count, int value) {
-    columnVector.putInts(rowId, count, value);
+    if (filteredRowsExist) {
+      for (int i = 0; i < count; i++) {
+        if (!filteredRows[rowId]) {
+          putInt(counter++, value);
+        }
+        rowId++;
+      }
+    } else {
+      columnVector.putInts(rowId, count, value);
+    }
   }
 
   @Override public void putLong(int rowId, long value) {
-    columnVector.putLong(rowId, value);
+    if (!filteredRows[rowId]) {
+      columnVector.putLong(counter++, value);
+    }
   }
 
   @Override public void putLongs(int rowId, int count, long value) {
-    columnVector.putLongs(rowId, count, value);
+    if (filteredRowsExist) {
+      for (int i = 0; i < count; i++) {
+        if (!filteredRows[rowId]) {
+          putLong(counter++, value);
+        }
+        rowId++;
+      }
+    } else {
+      columnVector.putLongs(rowId, count, value);
+    }
   }
 
   @Override public void putDecimal(int rowId, Decimal value, int precision) {
-    columnVector.putDecimal(rowId, value, precision);
+    if (!filteredRows[rowId]) {
+      columnVector.putDecimal(counter++, value, precision);
+    }
   }
 
   @Override public void putDecimals(int rowId, int count, Decimal value, int precision) {
     for (int i = 0; i < count; i++) {
-      putDecimal(rowId++, value, precision);
+      if (!filteredRows[rowId]) {
+        putDecimal(counter++, value, precision);
+      }
+      rowId++;
     }
   }
 
   @Override public void putDouble(int rowId, double value) {
-    columnVector.putDouble(rowId, value);
+    if (!filteredRows[rowId]) {
+      columnVector.putDouble(counter++, value);
+    }
   }
 
   @Override public void putDoubles(int rowId, int count, double value) {
-    columnVector.putDoubles(rowId, count, value);
+    if (filteredRowsExist) {
+      for (int i = 0; i < count; i++) {
+        if (!filteredRows[rowId]) {
+          putDouble(counter++, value);
+        }
+        rowId++;
+      }
+    } else {
+      columnVector.putDoubles(rowId, count, value);
+    }
   }
 
   @Override public void putBytes(int rowId, byte[] value) {
-    columnVector.putByteArray(rowId, value);
+    if (!filteredRows[rowId]) {
+      columnVector.putByteArray(counter++, value);
+    }
   }
 
   @Override public void putBytes(int rowId, int count, byte[] value) {
     for (int i = 0; i < count; i++) {
-      putBytes(rowId++, value);
+      if (!filteredRows[rowId]) {
+        putBytes(counter++, value);
+      }
+      rowId++;
     }
   }
 
   @Override public void putBytes(int rowId, int offset, int length, byte[] value) {
-    columnVector.putByteArray(rowId, value, offset, length);
+    if (!filteredRows[rowId]) {
+      columnVector.putByteArray(counter++, value, offset, length);
+    }
   }
 
   @Override public void putNull(int rowId) {
-    columnVector.putNull(rowId);
+    if (!filteredRows[rowId]) {
+      columnVector.putNull(counter++);
+    }
   }
 
   @Override public void putNulls(int rowId, int count) {
-    columnVector.putNulls(rowId, count);
+    if (filteredRowsExist) {
+      for (int i = 0; i < count; i++) {
+        if (!filteredRows[rowId]) {
+          putNull(counter++);
+        }
+        rowId++;
+      }
+    } else {
+      columnVector.putNulls(rowId, count);
+    }
   }
 
   @Override public boolean isNull(int rowId) {
@@ -108,6 +195,15 @@ class ColumnarVectorWrapper implements CarbonColumnVector {
   }
 
   @Override public void reset() {
-//    columnVector.reset();
+    counter = 0;
+    filteredRowsExist = false;
+  }
+
+  @Override public DataType getType() {
+    return columnVector.dataType();
+  }
+
+  @Override public void setFilteredRowsExist(boolean filteredRowsExist) {
+    this.filteredRowsExist = filteredRowsExist;
   }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/64f973e8/integration/spark2/src/main/java/org/apache/carbondata/spark/vectorreader/VectorizedCarbonRecordReader.java
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/java/org/apache/carbondata/spark/vectorreader/VectorizedCarbonRecordReader.java b/integration/spark2/src/main/java/org/apache/carbondata/spark/vectorreader/VectorizedCarbonRecordReader.java
index 3fdf9af..173c527 100644
--- a/integration/spark2/src/main/java/org/apache/carbondata/spark/vectorreader/VectorizedCarbonRecordReader.java
+++ b/integration/spark2/src/main/java/org/apache/carbondata/spark/vectorreader/VectorizedCarbonRecordReader.java
@@ -219,10 +219,11 @@ class VectorizedCarbonRecordReader extends AbstractRecordReader<Object> {
 
     columnarBatch = ColumnarBatch.allocate(new StructType(fields), memMode);
     CarbonColumnVector[] vectors = new CarbonColumnVector[fields.length];
+    boolean[] filteredRows = new boolean[columnarBatch.capacity()];
     for (int i = 0; i < fields.length; i++) {
-      vectors[i] = new ColumnarVectorWrapper(columnarBatch.column(i));
+      vectors[i] = new ColumnarVectorWrapper(columnarBatch.column(i), filteredRows);
     }
-    carbonColumnarBatch = new CarbonColumnarBatch(vectors, columnarBatch.capacity());
+    carbonColumnarBatch = new CarbonColumnarBatch(vectors, columnarBatch.capacity(), filteredRows);
   }
 
   private void initBatch() {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/64f973e8/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonAnalysisRules.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonAnalysisRules.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonAnalysisRules.scala
index 0fb5c47..c9fc46c 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonAnalysisRules.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonAnalysisRules.scala
@@ -79,13 +79,7 @@ object CarbonPreInsertionCasts extends Rule[LogicalPlan] {
   }
 }
 
-object CarbonIUDAnalysisRule extends Rule[LogicalPlan] {
-
-  var sparkSession: SparkSession = _
-
-  def init(sparkSession: SparkSession) {
-     this.sparkSession = sparkSession
-  }
+case class CarbonIUDAnalysisRule(sparkSession: SparkSession) extends Rule[LogicalPlan] {
 
   private val parser = new SparkSqlParser(sparkSession.sessionState.conf)
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/64f973e8/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonSessionState.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonSessionState.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonSessionState.scala
index e413840..156a12e 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonSessionState.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonSessionState.scala
@@ -67,7 +67,6 @@ class CarbonSessionCatalog(
   lazy val carbonEnv = {
     val env = new CarbonEnv
     env.init(sparkSession)
-    CarbonIUDAnalysisRule.init(sparkSession)
     env
   }
 
@@ -130,7 +129,7 @@ class CarbonSessionState(sparkSession: SparkSession) extends HiveSessionState(sp
         catalog.ParquetConversions ::
         catalog.OrcConversions ::
         CarbonPreInsertionCasts ::
-        CarbonIUDAnalysisRule ::
+        CarbonIUDAnalysisRule(sparkSession) ::
         AnalyzeCreateTable(sparkSession) ::
         PreprocessTableInsertion(conf) ::
         DataSourceAnalysis(conf) ::


[17/42] carbondata git commit: Update dml-operation-on-carbondata.md

Posted by ra...@apache.org.
Update dml-operation-on-carbondata.md

Update dml-operation-on-carbondata.md

Update dml-operation-on-carbondata.md

Update dml-operation-on-carbondata.md

Update dml-operation-on-carbondata.md


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/3db55843
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/3db55843
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/3db55843

Branch: refs/heads/branch-1.1
Commit: 3db55843aa15e809645250623140419c47fe263f
Parents: e67003c
Author: chenerlu <ch...@huawei.com>
Authored: Tue May 9 23:18:02 2017 +0800
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 12:57:47 2017 +0530

----------------------------------------------------------------------
 docs/dml-operation-on-carbondata.md | 3 +++
 1 file changed, 3 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/3db55843/docs/dml-operation-on-carbondata.md
----------------------------------------------------------------------
diff --git a/docs/dml-operation-on-carbondata.md b/docs/dml-operation-on-carbondata.md
index f9d9f45..e315468 100644
--- a/docs/dml-operation-on-carbondata.md
+++ b/docs/dml-operation-on-carbondata.md
@@ -340,6 +340,8 @@ SET (column_name1, column_name2,) =
 | sourceColumn | The source table column values to be updated in destination table. |
 | sourceTable | The table from which the records are updated into destination Carbon table. |
 
+NOTE: This functionality is currently not supported in Spark 2.x and will support soon.  
+
 ### Usage Guidelines
 The following conditions must be met for successful updation :
 
@@ -413,6 +415,7 @@ DELETE FROM table_name [WHERE expression];
 |--------------|-----------------------------------------------------------------------|
 | table_name | The name of the Carbon table in which you want to perform the delete. |
 
+NOTE: This functionality is currently not supported in Spark 2.x and will support soon.  
 
 ### Examples
 


[35/42] carbondata git commit: [CARBONDATA-1077] ColumnDict and ALL_DICTIONARY_PATH must be used with SINGLE_PASS='true'

Posted by ra...@apache.org.
[CARBONDATA-1077] ColumnDict  and ALL_DICTIONARY_PATH must be used with SINGLE_PASS='true'


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/fcb20924
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/fcb20924
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/fcb20924

Branch: refs/heads/branch-1.1
Commit: fcb20924fab8086e224439a9bb1e5be8af44b26b
Parents: 5b66732
Author: mohammadshahidkhan <mo...@gmail.com>
Authored: Mon May 22 18:34:14 2017 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 13:26:29 2017 +0530

----------------------------------------------------------------------
 .../dataload/TestLoadDataUseAllDictionary.scala |  2 +-
 .../predefdic/TestPreDefDictionary.scala        | 44 +++++++++++++++++++-
 .../execution/command/carbonTableSchema.scala   |  7 +++-
 .../util/ExternalColumnDictionaryTestCase.scala |  4 +-
 .../execution/command/carbonTableSchema.scala   |  7 +++-
 .../util/ExternalColumnDictionaryTestCase.scala |  4 +-
 6 files changed, 58 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/fcb20924/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataUseAllDictionary.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataUseAllDictionary.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataUseAllDictionary.scala
index 22cf8f7..d6deb89 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataUseAllDictionary.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataUseAllDictionary.scala
@@ -38,7 +38,7 @@ class TestLoadDataUseAllDictionary extends QueryTest with BeforeAndAfterAll{
       sql(s"""
            LOAD DATA LOCAL INPATH '$resourcesPath/source_without_header.csv' into table t3
            options('FILEHEADER'='id,date,country,name,phonetype,serialname,salary',
-           'All_DICTIONARY_PATH'='$resourcesPath/dict.txt')
+           'All_DICTIONARY_PATH'='$resourcesPath/dict.txt','single_pass'='true')
            """)
       assert(false)
     } catch {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/fcb20924/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/predefdic/TestPreDefDictionary.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/predefdic/TestPreDefDictionary.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/predefdic/TestPreDefDictionary.scala
index 69af708..ca117c2 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/predefdic/TestPreDefDictionary.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/predefdic/TestPreDefDictionary.scala
@@ -49,7 +49,7 @@ class TestPreDefDictionary extends QueryTest with BeforeAndAfterAll {
        STORED BY 'carbondata'""")
     sql(
       s"""LOAD DATA LOCAL INPATH '$testData' into table predefdictable
-           options('ALL_DICTIONARY_PATH'='$allDictFile')""")
+           options('ALL_DICTIONARY_PATH'='$allDictFile','single_pass'='true')""")
     checkAnswer(
       sql("select phonetype from predefdictable where phonetype='phone197'"),
       Seq(Row("phone197"))
@@ -89,9 +89,51 @@ class TestPreDefDictionary extends QueryTest with BeforeAndAfterAll {
       Seq(Row("phone197"))
     )
   }
+
+  test("validation test columndict with single_pass= false.") {
+    val csvFilePath = s"$resourcesPath/nullvalueserialization.csv"
+    val testData = s"$resourcesPath/predefdic/data3.csv"
+    val csvHeader = "ID,phonetype"
+    val dicFilePath = s"$resourcesPath/predefdic/dicfilepath.csv"
+    sql(
+      """CREATE TABLE IF NOT EXISTS columndicValidationTable (ID Int, phonetype String)
+       STORED BY 'carbondata'""")
+    try {
+      sql(
+        s"""LOAD DATA LOCAL INPATH '$testData' into table columndicValidationTable
+           options('COLUMNDICT'='phonetype:$dicFilePath', 'SINGLE_PASS'='false')""")
+    } catch {
+      case x: Throwable =>
+        val failMess: String = "Can not use all_dictionary_path or columndict without single_pass."
+        assert(failMess.equals(x.getMessage))
+    }
+  }
+
+  test("validation test ALL_DICTIONARY_PATH with single_pass= false.") {
+    val csvFilePath = s"$resourcesPath/nullvalueserialization.csv"
+    val testData = s"$resourcesPath/predefdic/data3.csv"
+    val csvHeader = "ID,phonetype"
+    val allDictFile = s"$resourcesPath/predefdic/allpredefdictionary.csv"
+    sql(
+      """CREATE TABLE IF NOT EXISTS predefdictableval (ID Int, phonetype String)
+       STORED BY 'carbondata'""")
+    try {
+    sql(
+      s"""LOAD DATA LOCAL INPATH '$testData' into table predefdictableval
+           options('ALL_DICTIONARY_PATH'='$allDictFile', 'SINGLE_PASS'='false')""")
+    } catch {
+      case x: Throwable =>
+        val failMess: String = "Can not use all_dictionary_path or columndict without single_pass."
+        assert(failMess.equals(x.getMessage))
+    }
+  }
+
   override def afterAll {
     sql("DROP TABLE IF EXISTS predefdictable")
     sql("DROP TABLE IF EXISTS predefdictable1")
     sql("DROP TABLE IF EXISTS columndicTable")
+    sql("DROP TABLE IF EXISTS columndicValidationTable")
+    sql("DROP TABLE IF EXISTS predefdictableval")
+
   }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/fcb20924/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
index 7258511..1c1adc1 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
@@ -438,8 +438,11 @@ case class LoadTable(
         case "true" =>
           true
         case "false" =>
-          if (!StringUtils.isEmpty(allDictionaryPath)) {
-            true
+          // when single_pass = false  and if either alldictionary
+          // or columnDict is configured the do not allow load
+          if (StringUtils.isNotEmpty(allDictionaryPath) || StringUtils.isNotEmpty(columnDict)) {
+            throw new MalformedCarbonCommandException(
+              "Can not use all_dictionary_path or columndict without single_pass.")
           } else {
             false
           }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/fcb20924/integration/spark/src/test/scala/org/apache/carbondata/spark/util/ExternalColumnDictionaryTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/util/ExternalColumnDictionaryTestCase.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/util/ExternalColumnDictionaryTestCase.scala
index 05b94ee..4505429 100644
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/util/ExternalColumnDictionaryTestCase.scala
+++ b/integration/spark/src/test/scala/org/apache/carbondata/spark/util/ExternalColumnDictionaryTestCase.scala
@@ -204,7 +204,7 @@ class ExternalColumnDictionaryTestCase extends QueryTest with BeforeAndAfterAll
     try {
       sql(s"""
       LOAD DATA LOCAL INPATH "$complexFilePath1" INTO TABLE loadSqlTest
-      OPTIONS('FILEHEADER'='$header', 'COLUMNDICT'='$extColDictFilePath1')
+      OPTIONS('single_pass'='true','FILEHEADER'='$header', 'COLUMNDICT'='$extColDictFilePath1')
         """)
     } catch {
       case ex: Exception =>
@@ -234,7 +234,7 @@ class ExternalColumnDictionaryTestCase extends QueryTest with BeforeAndAfterAll
     try {
       sql(s"""
       LOAD DATA LOCAL INPATH "$complexFilePath1" INTO TABLE loadSqlTest
-      OPTIONS('FILEHEADER'='$header', 'COLUMNDICT'='gamePointId:$filePath')
+      OPTIONS('single_pass'='true','FILEHEADER'='$header', 'COLUMNDICT'='gamePointId:$filePath')
       """)
       assert(false)
     } catch {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/fcb20924/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
index 8818c6b..530c4cb 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
@@ -448,8 +448,11 @@ case class LoadTable(
         case "true" =>
           true
         case "false" =>
-          if (!StringUtils.isEmpty(allDictionaryPath)) {
-            true
+          // when single_pass = false  and if either alldictionarypath
+          // or columnDict is configured the do not allow load
+          if (StringUtils.isNotEmpty(allDictionaryPath) || StringUtils.isNotEmpty(columnDict)) {
+            throw new MalformedCarbonCommandException(
+              "Can not use all_dictionary_path or columndict without single_pass.")
           } else {
             false
           }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/fcb20924/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/ExternalColumnDictionaryTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/ExternalColumnDictionaryTestCase.scala b/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/ExternalColumnDictionaryTestCase.scala
index 10f99b7..1c16ea4 100644
--- a/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/ExternalColumnDictionaryTestCase.scala
+++ b/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/ExternalColumnDictionaryTestCase.scala
@@ -231,7 +231,7 @@ class ExternalColumnDictionaryTestCase extends QueryTest with BeforeAndAfterAll
       sql(
         s"""
       LOAD DATA LOCAL INPATH "$complexFilePath1" INTO TABLE loadSqlTest
-      OPTIONS('FILEHEADER'='$header', 'COLUMNDICT'='$extColDictFilePath1')
+      OPTIONS('FILEHEADER'='$header', 'COLUMNDICT'='$extColDictFilePath1', 'single_pass'='true')
         """)
     } catch {
       case ex: Exception =>
@@ -264,7 +264,7 @@ class ExternalColumnDictionaryTestCase extends QueryTest with BeforeAndAfterAll
       sql(
         s"""
       LOAD DATA LOCAL INPATH "$complexFilePath1" INTO TABLE loadSqlTest
-      OPTIONS('FILEHEADER'='$header', 'COLUMNDICT'='gamePointId:$filePath')
+      OPTIONS('single_pass'='true','FILEHEADER'='$header', 'COLUMNDICT'='gamePointId:$filePath')
       """)
       assert(false)
     } catch {


[07/42] carbondata git commit: updated null check for right expression in not in expression

Posted by ra...@apache.org.
updated null check for right expression in not in expression


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/93f7f966
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/93f7f966
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/93f7f966

Branch: refs/heads/branch-1.1
Commit: 93f7f96674ce25bf83b13bb7947178ce52115420
Parents: fd50ad2
Author: kunal642 <ku...@knoldus.in>
Authored: Thu May 18 13:48:34 2017 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 12:44:54 2017 +0530

----------------------------------------------------------------------
 .../expression/conditional/NotInExpression.java | 22 ++++++++++----------
 .../ExpressionWithNullTestCase.scala            |  2 ++
 2 files changed, 13 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/93f7f966/core/src/main/java/org/apache/carbondata/core/scan/expression/conditional/NotInExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/expression/conditional/NotInExpression.java b/core/src/main/java/org/apache/carbondata/core/scan/expression/conditional/NotInExpression.java
index 2552f96..67e3a50 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/expression/conditional/NotInExpression.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/expression/conditional/NotInExpression.java
@@ -42,6 +42,17 @@ public class NotInExpression extends BinaryConditionalExpression {
     if (setOfExprResult == null) {
       ExpressionResult val = null;
       ExpressionResult rightRsult = right.evaluate(value);
+      // Both left and right result need to be checked for null because NotInExpression is basically
+      // an And Operation on the list of predicates that are provided.
+      // Example: x in (1,2,null) would be converted to x=1 AND x=2 AND x=null.
+      // If any of the predicates is null then the result is unknown for all the predicates thus
+      // we will return false for each of them.
+      for (ExpressionResult expressionResult: rightRsult.getList()) {
+        if (expressionResult.isNull() || leftRsult.isNull()) {
+          leftRsult.set(DataType.BOOLEAN, false);
+          return leftRsult;
+        }
+      }
       setOfExprResult = new HashSet<ExpressionResult>(10);
       for (ExpressionResult exprResVal : rightRsult.getList()) {
         if (exprResVal.getDataType().getPrecedenceOrder() < leftRsult.getDataType()
@@ -80,17 +91,6 @@ public class NotInExpression extends BinaryConditionalExpression {
         setOfExprResult.add(val);
       }
     }
-    // Both left and right results need to be checked for null because NotInExpression is basically
-    // an And Operation on the list of predicates that are provided.
-    // Example: x in (1,2,null) would be converted to x=1 AND x=2 AND x=null.
-    // If any of the predicates is null then the result is unknown for all the predicates thus
-    // we will return false for each of them.
-    for (ExpressionResult expressionResult: setOfExprResult) {
-      if (expressionResult.isNull() || leftRsult.isNull()) {
-        leftRsult.set(DataType.BOOLEAN, false);
-        return leftRsult;
-      }
-    }
     leftRsult.set(DataType.BOOLEAN, !setOfExprResult.contains(leftRsult));
     return leftRsult;
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/93f7f966/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/detailquery/ExpressionWithNullTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/detailquery/ExpressionWithNullTestCase.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/detailquery/ExpressionWithNullTestCase.scala
index a421c7e..cbc2750 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/detailquery/ExpressionWithNullTestCase.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/detailquery/ExpressionWithNullTestCase.scala
@@ -38,6 +38,7 @@ class ExpressionWithNullTestCase extends QueryTest with BeforeAndAfterAll {
   }
 
   test("test to check in expression with null values") {
+    checkAnswer(sql("select * from expression_test where id in (1,2,'', NULL, ' ')"), sql("select * from expression_test_hive where id in (1,2,' ', NULL, ' ')"))
     checkAnswer(sql("select * from expression_test where id in (1,2,'')"), sql("select * from expression_test_hive where id in (1,2,'')"))
     checkAnswer(sql("select * from expression_test where id in ('')"), sql("select * from expression_test_hive where id in ('')"))
     checkAnswer(sql("select * from expression_test where number in (null)"), sql("select * from expression_test_hive where number in (null)"))
@@ -54,6 +55,7 @@ class ExpressionWithNullTestCase extends QueryTest with BeforeAndAfterAll {
   }
 
   test("test to check not in expression with null values") {
+    checkAnswer(sql("select * from expression_test where id not in (1,2,'', NULL, ' ')"), sql("select * from expression_test_hive where id not in (1,2,' ', NULL, ' ')"))
     checkAnswer(sql("select * from expression_test where id not in (1,2,'')"), sql("select * from expression_test_hive where id not in (1,2,'')"))
     checkAnswer(sql("select * from expression_test where id not in ('')"), sql("select * from expression_test_hive where id not in ('')"))
     checkAnswer(sql("select * from expression_test where number not in (null)"), sql("select * from expression_test_hive where number not in (null)"))


[06/42] carbondata git commit: [CARBONDATA-1061] if AL_DICTIONARY_PATH is used in load option then by SINGLE_PASS must be used.

Posted by ra...@apache.org.
[CARBONDATA-1061] if AL_DICTIONARY_PATH is used in load option then by SINGLE_PASS must be used.


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/fd50ad26
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/fd50ad26
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/fd50ad26

Branch: refs/heads/branch-1.1
Commit: fd50ad26eee98257ec258da5e32726828b54b071
Parents: 8c5540d
Author: mohammadshahidkhan <mo...@gmail.com>
Authored: Wed May 17 19:35:54 2017 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 12:44:43 2017 +0530

----------------------------------------------------------------------
 .../resources/predefdic/allpredefdictionary.csv |   3 +
 .../src/test/resources/predefdic/data3.csv      |   4 +
 .../test/resources/predefdic/dicfilepath.csv    |   2 +
 .../predefdic/TestPreDefDictionary.scala        |  97 +++++++
 .../spark/util/GlobalDictionaryUtil.scala       |  78 +++--
 .../execution/command/carbonTableSchema.scala   |  45 +--
 .../execution/command/carbonTableSchema.scala   |  46 +--
 .../spark/util/AllDictionaryTestCase.scala      | 160 +++++++++++
 .../spark/util/DictionaryTestCaseUtil.scala     |  49 ++++
 .../util/ExternalColumnDictionaryTestCase.scala | 284 +++++++++++++++++++
 10 files changed, 699 insertions(+), 69 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/fd50ad26/integration/spark-common-test/src/test/resources/predefdic/allpredefdictionary.csv
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/resources/predefdic/allpredefdictionary.csv b/integration/spark-common-test/src/test/resources/predefdic/allpredefdictionary.csv
new file mode 100644
index 0000000..27152ca
--- /dev/null
+++ b/integration/spark-common-test/src/test/resources/predefdic/allpredefdictionary.csv
@@ -0,0 +1,3 @@
+1,phone756
+1,phonetype
+1,phone757

http://git-wip-us.apache.org/repos/asf/carbondata/blob/fd50ad26/integration/spark-common-test/src/test/resources/predefdic/data3.csv
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/resources/predefdic/data3.csv b/integration/spark-common-test/src/test/resources/predefdic/data3.csv
new file mode 100644
index 0000000..c84506a
--- /dev/null
+++ b/integration/spark-common-test/src/test/resources/predefdic/data3.csv
@@ -0,0 +1,4 @@
+ID,phonetype
+1,phone197
+2,phone756
+3,phone757
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/fd50ad26/integration/spark-common-test/src/test/resources/predefdic/dicfilepath.csv
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/resources/predefdic/dicfilepath.csv b/integration/spark-common-test/src/test/resources/predefdic/dicfilepath.csv
new file mode 100644
index 0000000..e49a020
--- /dev/null
+++ b/integration/spark-common-test/src/test/resources/predefdic/dicfilepath.csv
@@ -0,0 +1,2 @@
+phone756
+phone757

http://git-wip-us.apache.org/repos/asf/carbondata/blob/fd50ad26/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/predefdic/TestPreDefDictionary.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/predefdic/TestPreDefDictionary.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/predefdic/TestPreDefDictionary.scala
new file mode 100644
index 0000000..69af708
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/predefdic/TestPreDefDictionary.scala
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.spark.testsuite.predefdic
+
+import org.apache.spark.sql.Row
+import org.apache.spark.sql.common.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.util.CarbonProperties
+
+/**
+ * Test cases for testing columns having \N or \null values for non numeric columns
+ */
+class TestPreDefDictionary extends QueryTest with BeforeAndAfterAll {
+
+  override def beforeAll {
+    sql("DROP TABLE IF EXISTS predefdictable")
+    sql("DROP TABLE IF EXISTS predefdictable1")
+    sql("DROP TABLE IF EXISTS columndicTable")
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
+        CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT
+      )
+  }
+
+  test("dictionary value not present in the allpredefdictionary dic file must be loaded.") {
+    val csvFilePath = s"$resourcesPath/nullvalueserialization.csv"
+    val testData = s"$resourcesPath/predefdic/data3.csv"
+    val csvHeader = "ID,phonetype"
+    val allDictFile = s"$resourcesPath/predefdic/allpredefdictionary.csv"
+    sql(
+      """CREATE TABLE IF NOT EXISTS predefdictable (ID Int, phonetype String)
+       STORED BY 'carbondata'""")
+    sql(
+      s"""LOAD DATA LOCAL INPATH '$testData' into table predefdictable
+           options('ALL_DICTIONARY_PATH'='$allDictFile')""")
+    checkAnswer(
+      sql("select phonetype from predefdictable where phonetype='phone197'"),
+      Seq(Row("phone197"))
+    )
+  }
+
+  test("dictionary value not present in the allpredefdictionary dic with single_pass.") {
+    val csvFilePath = s"$resourcesPath/nullvalueserialization.csv"
+    val testData = s"$resourcesPath/predefdic/data3.csv"
+    val csvHeader = "ID,phonetype"
+    val allDictFile = s"$resourcesPath/predefdic/allpredefdictionary.csv"
+    sql(
+      """CREATE TABLE IF NOT EXISTS predefdictable1 (ID Int, phonetype String)
+       STORED BY 'carbondata'""")
+    sql(
+      s"""LOAD DATA LOCAL INPATH '$testData' into table predefdictable1
+           options('ALL_DICTIONARY_PATH'='$allDictFile', 'SINGLE_PASS'='true')""")
+    checkAnswer(
+      sql("select phonetype from predefdictable1 where phonetype='phone197'"),
+      Seq(Row("phone197"))
+    )
+  }
+
+  test("dictionary value not present in the columndict dic with single_pass.") {
+    val csvFilePath = s"$resourcesPath/nullvalueserialization.csv"
+    val testData = s"$resourcesPath/predefdic/data3.csv"
+    val csvHeader = "ID,phonetype"
+    val dicFilePath = s"$resourcesPath/predefdic/dicfilepath.csv"
+    sql(
+      """CREATE TABLE IF NOT EXISTS columndicTable (ID Int, phonetype String)
+       STORED BY 'carbondata'""")
+    sql(
+      s"""LOAD DATA LOCAL INPATH '$testData' into table columndicTable
+           options('COLUMNDICT'='phonetype:$dicFilePath', 'SINGLE_PASS'='true')""")
+    checkAnswer(
+      sql("select phonetype from columndicTable where phonetype='phone197'"),
+      Seq(Row("phone197"))
+    )
+  }
+  override def afterAll {
+    sql("DROP TABLE IF EXISTS predefdictable")
+    sql("DROP TABLE IF EXISTS predefdictable1")
+    sql("DROP TABLE IF EXISTS columndicTable")
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/fd50ad26/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtil.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtil.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtil.scala
index f690eef..549bdf9 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtil.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtil.scala
@@ -750,37 +750,13 @@ object GlobalDictionaryUtil {
           LOGGER.info("No column found for generating global dictionary in source data files")
         }
       } else {
-        LOGGER.info("Generate global dictionary from dictionary files!")
-        val isNonempty = validateAllDictionaryPath(allDictionaryPath)
-        if (isNonempty) {
-          var headers = carbonLoadModel.getCsvHeaderColumns
-          headers = headers.map(headerName => headerName.trim)
-          // prune columns according to the CSV file header, dimension columns
-          val (requireDimension, requireColumnNames) = pruneDimensions(dimensions, headers, headers)
-          if (requireDimension.nonEmpty) {
-            val model = createDictionaryLoadModel(carbonLoadModel, carbonTableIdentifier,
-              requireDimension, storePath, dictfolderPath, false)
-            // check if dictionary files contains bad record
-            val accumulator = sqlContext.sparkContext.accumulator(0)
-            // read local dictionary file, and group by key
-            val allDictionaryRdd = readAllDictionaryFiles(sqlContext, headers,
-              requireColumnNames, allDictionaryPath, accumulator)
-            // read exist dictionary and combine
-            val inputRDD = new CarbonAllDictionaryCombineRDD(allDictionaryRdd, model)
-              .partitionBy(new ColumnPartitioner(model.primDimensions.length))
-            // generate global dictionary files
-            val statusList = new CarbonGlobalDictionaryGenerateRDD(inputRDD, model).collect()
-            // check result status
-            checkStatus(carbonLoadModel, sqlContext, model, statusList)
-            // if the dictionary contains wrong format record, throw ex
-            if (accumulator.value > 0) {
-              throw new DataLoadingException("Data Loading failure, dictionary values are " +
-                                             "not in correct format!")
-            }
-          } else {
-            LOGGER.info("have no column need to generate global dictionary")
-          }
-        }
+        generateDictionaryFromDictionaryFiles(sqlContext,
+          carbonLoadModel,
+          storePath,
+          carbonTableIdentifier,
+          dictfolderPath,
+          dimensions,
+          allDictionaryPath)
       }
     } catch {
       case ex: Exception =>
@@ -796,6 +772,46 @@ object GlobalDictionaryUtil {
     }
   }
 
+  def generateDictionaryFromDictionaryFiles(sqlContext: SQLContext,
+      carbonLoadModel: CarbonLoadModel,
+      storePath: String,
+      carbonTableIdentifier: CarbonTableIdentifier,
+      dictfolderPath: String,
+      dimensions: Array[CarbonDimension],
+      allDictionaryPath: String): Unit = {
+    LOGGER.info("Generate global dictionary from dictionary files!")
+    val isNonempty = validateAllDictionaryPath(allDictionaryPath)
+    if (isNonempty) {
+      var headers = carbonLoadModel.getCsvHeaderColumns
+      headers = headers.map(headerName => headerName.trim)
+      // prune columns according to the CSV file header, dimension columns
+      val (requireDimension, requireColumnNames) = pruneDimensions(dimensions, headers, headers)
+      if (requireDimension.nonEmpty) {
+        val model = createDictionaryLoadModel(carbonLoadModel, carbonTableIdentifier,
+          requireDimension, storePath, dictfolderPath, false)
+        // check if dictionary files contains bad record
+        val accumulator = sqlContext.sparkContext.accumulator(0)
+        // read local dictionary file, and group by key
+        val allDictionaryRdd = readAllDictionaryFiles(sqlContext, headers,
+          requireColumnNames, allDictionaryPath, accumulator)
+        // read exist dictionary and combine
+        val inputRDD = new CarbonAllDictionaryCombineRDD(allDictionaryRdd, model)
+          .partitionBy(new ColumnPartitioner(model.primDimensions.length))
+        // generate global dictionary files
+        val statusList = new CarbonGlobalDictionaryGenerateRDD(inputRDD, model).collect()
+        // check result status
+        checkStatus(carbonLoadModel, sqlContext, model, statusList)
+        // if the dictionary contains wrong format record, throw ex
+        if (accumulator.value > 0) {
+          throw new DataLoadingException("Data Loading failure, dictionary values are " +
+                                         "not in correct format!")
+        }
+      } else {
+        LOGGER.info("have no column need to generate global dictionary")
+      }
+    }
+  }
+
   // Get proper error message of TextParsingException
   def trimErrorMessage(input: String): String = {
     var errorMessage: String = null

http://git-wip-us.apache.org/repos/asf/carbondata/blob/fd50ad26/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
index 9745ddd..ac51fa0 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
@@ -53,7 +53,7 @@ import org.apache.carbondata.processing.model.{CarbonDataLoadSchema, CarbonLoadM
 import org.apache.carbondata.processing.newflow.constants.DataLoadProcessorConstants
 import org.apache.carbondata.spark.exception.MalformedCarbonCommandException
 import org.apache.carbondata.spark.rdd.{CarbonDataRDDFactory, DataManagementFunc, DictionaryLoadModel}
-import org.apache.carbondata.spark.util.{CarbonScalaUtil, CommonUtil, GlobalDictionaryUtil}
+import org.apache.carbondata.spark.util.{CommonUtil, GlobalDictionaryUtil}
 
 object Checker {
   def validateTableExists(
@@ -425,18 +425,16 @@ case class LoadTable(
       // when single_pass=true, and not use all dict
       val useOnePass = options.getOrElse("single_pass", "false").trim.toLowerCase match {
         case "true" =>
-          if (StringUtils.isEmpty(allDictionaryPath)) {
+          true
+        case "false" =>
+          if (!StringUtils.isEmpty(allDictionaryPath)) {
             true
           } else {
-            LOGGER.error("Can't use single_pass, because SINGLE_PASS and ALL_DICTIONARY_PATH" +
-              "can not be used together")
             false
           }
-        case "false" =>
-          false
         case illegal =>
           LOGGER.error(s"Can't use single_pass, because illegal syntax found: [" + illegal + "] " +
-            "Please set it as 'true' or 'false'")
+                       "Please set it as 'true' or 'false'")
           false
       }
       carbonLoadModel.setUseOnePass(useOnePass)
@@ -470,25 +468,36 @@ case class LoadTable(
           maxColumns)
         carbonLoadModel.setMaxColumns(validatedMaxColumns.toString)
         GlobalDictionaryUtil.updateTableMetadataFunc = updateTableMetadata
-
+        val storePath = relation.tableMeta.storePath
+        val carbonTable = carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable
+        val carbonTableIdentifier = carbonTable.getAbsoluteTableIdentifier
+          .getCarbonTableIdentifier
+        val carbonTablePath = CarbonStorePath
+          .getCarbonTablePath(storePath, carbonTableIdentifier)
+        val dictFolderPath = carbonTablePath.getMetadataDirectoryPath
+        val dimensions = carbonTable.getDimensionByTableName(
+          carbonTable.getFactTableName).asScala.toArray
         if (carbonLoadModel.getUseOnePass) {
           val colDictFilePath = carbonLoadModel.getColDictFilePath
-          if (colDictFilePath != null) {
-            val storePath = relation.tableMeta.storePath
-            val carbonTable = carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable
-            val carbonTableIdentifier = carbonTable.getAbsoluteTableIdentifier
-              .getCarbonTableIdentifier
-            val carbonTablePath = CarbonStorePath
-              .getCarbonTablePath(storePath, carbonTableIdentifier)
-            val dictFolderPath = carbonTablePath.getMetadataDirectoryPath
-            val dimensions = carbonTable.getDimensionByTableName(
-              carbonTable.getFactTableName).asScala.toArray
+          if (!StringUtils.isEmpty(colDictFilePath)) {
             carbonLoadModel.initPredefDictMap()
             // generate predefined dictionary
             GlobalDictionaryUtil
               .generatePredefinedColDictionary(colDictFilePath, carbonTableIdentifier,
                 dimensions, carbonLoadModel, sqlContext, storePath, dictFolderPath)
           }
+          val allDictPath: String = carbonLoadModel.getAllDictPath
+          if(!StringUtils.isEmpty(allDictPath)) {
+            carbonLoadModel.initPredefDictMap()
+            GlobalDictionaryUtil
+              .generateDictionaryFromDictionaryFiles(sqlContext,
+                carbonLoadModel,
+                storePath,
+                carbonTableIdentifier,
+                dictFolderPath,
+                dimensions,
+                allDictionaryPath)
+          }
           // dictionaryServerClient dictionary generator
           val dictionaryServerPort = CarbonProperties.getInstance()
             .getProperty(CarbonCommonConstants.DICTIONARY_SERVER_PORT,

http://git-wip-us.apache.org/repos/asf/carbondata/blob/fd50ad26/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
index 94a95fd..6bc9e61 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
@@ -41,22 +41,19 @@ import org.apache.carbondata.core.datastore.impl.FileFactory
 import org.apache.carbondata.core.dictionary.server.DictionaryServer
 import org.apache.carbondata.core.locks.{CarbonLockFactory, LockUsage}
 import org.apache.carbondata.core.metadata.{CarbonMetadata, CarbonTableIdentifier}
-import org.apache.carbondata.core.metadata.converter.ThriftWrapperSchemaConverterImpl
 import org.apache.carbondata.core.metadata.encoder.Encoding
 import org.apache.carbondata.core.metadata.schema.table.{CarbonTable, TableInfo}
-import org.apache.carbondata.core.metadata.schema.table.column.{CarbonColumn, CarbonDimension}
+import org.apache.carbondata.core.metadata.schema.table.column.CarbonDimension
 import org.apache.carbondata.core.mutate.{CarbonUpdateUtil, TupleIdEnum}
 import org.apache.carbondata.core.util.{CarbonProperties, CarbonUtil}
 import org.apache.carbondata.core.util.path.CarbonStorePath
-import org.apache.carbondata.format.SchemaEvolutionEntry
 import org.apache.carbondata.processing.constants.TableOptionConstant
 import org.apache.carbondata.processing.etl.DataLoadingException
 import org.apache.carbondata.processing.model.{CarbonDataLoadSchema, CarbonLoadModel}
 import org.apache.carbondata.processing.newflow.constants.DataLoadProcessorConstants
 import org.apache.carbondata.spark.exception.MalformedCarbonCommandException
 import org.apache.carbondata.spark.rdd.{CarbonDataRDDFactory, DictionaryLoadModel}
-import org.apache.carbondata.spark.util.{CarbonScalaUtil, CarbonSparkUtil, CommonUtil,
-DataTypeConverterUtil, GlobalDictionaryUtil}
+import org.apache.carbondata.spark.util.{CarbonSparkUtil, CommonUtil, GlobalDictionaryUtil}
 
 object Checker {
   def validateTableExists(
@@ -436,15 +433,13 @@ case class LoadTable(
           DataLoadProcessorConstants.IS_EMPTY_DATA_BAD_RECORD + "," + isEmptyDataBadRecord)
       val useOnePass = options.getOrElse("single_pass", "false").trim.toLowerCase match {
         case "true" =>
-          if (StringUtils.isEmpty(allDictionaryPath)) {
+          true
+        case "false" =>
+          if (!StringUtils.isEmpty(allDictionaryPath)) {
             true
           } else {
-            LOGGER.error("Can't use single_pass, because SINGLE_PASS and ALL_DICTIONARY_PATH" +
-                         "can not be used together")
             false
           }
-        case "false" =>
-          false
         case illegal =>
           LOGGER.error(s"Can't use single_pass, because illegal syntax found: [" + illegal + "] " +
                        "Please set it as 'true' or 'false'")
@@ -480,23 +475,34 @@ case class LoadTable(
         carbonLoadModel.setMaxColumns(validatedMaxColumns.toString)
         GlobalDictionaryUtil.updateTableMetadataFunc = LoadTable.updateTableMetadata
         if (carbonLoadModel.getUseOnePass) {
+          val storePath = relation.tableMeta.storePath
+          val carbonTable = carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable
+          val carbonTableIdentifier = carbonTable.getAbsoluteTableIdentifier
+            .getCarbonTableIdentifier
+          val carbonTablePath = CarbonStorePath
+            .getCarbonTablePath(storePath, carbonTableIdentifier)
+          val dictFolderPath = carbonTablePath.getMetadataDirectoryPath
+          val dimensions = carbonTable.getDimensionByTableName(
+            carbonTable.getFactTableName).asScala.toArray
           val colDictFilePath = carbonLoadModel.getColDictFilePath
-          if (colDictFilePath != null) {
-            val storePath = relation.tableMeta.storePath
-            val carbonTable = carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable
-            val carbonTableIdentifier = carbonTable.getAbsoluteTableIdentifier
-              .getCarbonTableIdentifier
-            val carbonTablePath = CarbonStorePath
-              .getCarbonTablePath(storePath, carbonTableIdentifier)
-            val dictFolderPath = carbonTablePath.getMetadataDirectoryPath
-            val dimensions = carbonTable.getDimensionByTableName(
-              carbonTable.getFactTableName).asScala.toArray
+          if (!StringUtils.isEmpty(colDictFilePath)) {
             carbonLoadModel.initPredefDictMap()
             // generate predefined dictionary
             GlobalDictionaryUtil
               .generatePredefinedColDictionary(colDictFilePath, carbonTableIdentifier,
                 dimensions, carbonLoadModel, sparkSession.sqlContext, storePath, dictFolderPath)
           }
+          if (!StringUtils.isEmpty(allDictionaryPath)) {
+            carbonLoadModel.initPredefDictMap()
+            GlobalDictionaryUtil
+              .generateDictionaryFromDictionaryFiles(sparkSession.sqlContext,
+                carbonLoadModel,
+                storePath,
+                carbonTableIdentifier,
+                dictFolderPath,
+                dimensions,
+                allDictionaryPath)
+          }
           // dictionaryServerClient dictionary generator
           val dictionaryServerPort = CarbonProperties.getInstance()
             .getProperty(CarbonCommonConstants.DICTIONARY_SERVER_PORT,

http://git-wip-us.apache.org/repos/asf/carbondata/blob/fd50ad26/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/AllDictionaryTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/AllDictionaryTestCase.scala b/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/AllDictionaryTestCase.scala
new file mode 100644
index 0000000..23800ee
--- /dev/null
+++ b/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/AllDictionaryTestCase.scala
@@ -0,0 +1,160 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.carbondata.spark.util
+
+import org.apache.spark.sql.common.util.QueryTest
+import org.apache.spark.sql.hive.CarbonRelation
+import org.apache.spark.sql.{CarbonEnv, SparkSession}
+import org.scalatest.BeforeAndAfterAll
+
+import org.apache.carbondata.common.logging.LogServiceFactory
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.util.CarbonProperties
+import org.apache.carbondata.processing.constants.TableOptionConstant
+import org.apache.carbondata.processing.model.{CarbonDataLoadSchema, CarbonLoadModel}
+
+/**
+  * Test Case for org.apache.carbondata.integration.spark.util.GlobalDictionaryUtil
+  */
+class AllDictionaryTestCase extends QueryTest with BeforeAndAfterAll {
+  private val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
+  var pwd: String = _
+  var sampleRelation: CarbonRelation = _
+  var complexRelation: CarbonRelation = _
+  var sampleAllDictionaryFile: String = _
+  var complexAllDictionaryFile: String = _
+
+  def buildCarbonLoadModel(relation: CarbonRelation,
+    filePath: String,
+    header: String,
+    allDictFilePath: String): CarbonLoadModel = {
+    val carbonLoadModel = new CarbonLoadModel
+    carbonLoadModel.setTableName(relation.tableMeta.carbonTableIdentifier.getDatabaseName)
+    carbonLoadModel.setDatabaseName(relation.tableMeta.carbonTableIdentifier.getTableName)
+    val table = relation.tableMeta.carbonTable
+    val carbonSchema = new CarbonDataLoadSchema(table)
+    carbonLoadModel.setDatabaseName(table.getDatabaseName)
+    carbonLoadModel.setTableName(table.getFactTableName)
+    carbonLoadModel.setCarbonDataLoadSchema(carbonSchema)
+    carbonLoadModel.setFactFilePath(filePath)
+    carbonLoadModel.setCsvHeader(header)
+    carbonLoadModel.setCsvDelimiter(",")
+    carbonLoadModel.setComplexDelimiterLevel1("\\$")
+    carbonLoadModel.setComplexDelimiterLevel2("\\:")
+    carbonLoadModel.setAllDictPath(allDictFilePath)
+    carbonLoadModel.setSerializationNullFormat(
+          TableOptionConstant.SERIALIZATION_NULL_FORMAT.getName + ",\\N")
+    carbonLoadModel.setDefaultTimestampFormat(CarbonProperties.getInstance().getProperty(
+      CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
+      CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT))
+    carbonLoadModel.setCsvHeaderColumns(CommonUtil.getCsvHeaderColumns(carbonLoadModel))
+    carbonLoadModel
+  }
+
+  override def beforeAll {
+    sql("drop table if exists sample")
+    sql("drop table if exists complextypes")
+    buildTestData
+    // second time comment this line
+    buildTable
+    buildRelation
+  }
+
+  def buildTestData() = {
+    sampleAllDictionaryFile = s"${resourcesPath}/alldictionary/sample/20160423/1400_1405/*.dictionary"
+    complexAllDictionaryFile = s"${resourcesPath}/alldictionary/complex/20160423/1400_1405/*.dictionary"
+  }
+
+  def buildTable() = {
+    try {
+      sql(
+        "CREATE TABLE IF NOT EXISTS sample (id STRING, name STRING, city STRING, " +
+          "age INT) STORED BY 'org.apache.carbondata.format'"
+      )
+    } catch {
+      case ex: Throwable => LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
+    }
+    try {
+      sql(
+        "create table complextypes (deviceInformationId string, channelsId string, " +
+          "ROMSize string, purchasedate string, mobile struct<imei: string, imsi: string>, MAC " +
+          "array<string>, locationinfo array<struct<ActiveAreaId: INT, ActiveCountry: string, " +
+          "ActiveProvince: string, Activecity: string, ActiveDistrict: string, ActiveStreet: " +
+          "string>>, proddate struct<productionDate: string,activeDeactivedate: array<string>>, " +
+          "gamePointId INT,contractNumber INT) STORED BY 'org.apache.carbondata.format'" +
+          "TBLPROPERTIES('DICTIONARY_EXCLUDE'='ROMSize')"
+      )
+    } catch {
+      case ex: Throwable => LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
+    }
+  }
+
+  def buildRelation() = {
+    val warehouse = s"$resourcesPath/target/warehouse"
+    val storeLocation = s"$resourcesPath/target/store"
+    val metastoredb = s"$resourcesPath/target"
+    CarbonProperties.getInstance()
+      .addProperty("carbon.custom.distribution", "true")
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION,"FORCE")
+    import org.apache.spark.sql.CarbonSession._
+
+    val spark = SparkSession
+      .builder()
+      .master("local")
+      .appName("CarbonSessionExample")
+      .config("spark.sql.warehouse.dir", warehouse)
+      .config("spark.network.timeout", "600s")
+      .config("spark.executor.heartbeatInterval", "600s")
+      .config("carbon.enable.vector.reader","false")
+      .getOrCreateCarbonSession(storeLocation, metastoredb)
+    val catalog = CarbonEnv.getInstance(spark).carbonMetastore
+    sampleRelation = catalog.lookupRelation(Option(CarbonCommonConstants.DATABASE_DEFAULT_NAME),
+      "sample")(spark).asInstanceOf[CarbonRelation]
+    complexRelation = catalog.lookupRelation(Option(CarbonCommonConstants.DATABASE_DEFAULT_NAME),
+      "complextypes")(spark).asInstanceOf[CarbonRelation]
+  }
+
+  test("Support generate global dictionary from all dictionary files") {
+    val header = "id,name,city,age"
+    val carbonLoadModel = buildCarbonLoadModel(sampleRelation, null, header, sampleAllDictionaryFile)
+    GlobalDictionaryUtil
+      .generateGlobalDictionary(sqlContext,
+        carbonLoadModel,
+        sampleRelation.tableMeta.storePath)
+
+    DictionaryTestCaseUtil.
+      checkDictionary(sampleRelation, "city", "shenzhen")
+  }
+
+  test("Support generate global dictionary from all dictionary files for complex type") {
+    val header = "deviceInformationId,channelsId,ROMSize,purchasedate,mobile,MAC,locationinfo,proddate,gamePointId,contractNumber"
+    val carbonLoadModel = buildCarbonLoadModel(complexRelation, null, header, complexAllDictionaryFile)
+    GlobalDictionaryUtil
+      .generateGlobalDictionary(sqlContext,
+      carbonLoadModel,
+      complexRelation.tableMeta.storePath)
+
+    DictionaryTestCaseUtil.
+      checkDictionary(complexRelation, "channelsId", "1650")
+  }
+  
+  override def afterAll {
+    sql("drop table sample")
+    sql("drop table complextypes")
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/fd50ad26/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/DictionaryTestCaseUtil.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/DictionaryTestCaseUtil.scala b/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/DictionaryTestCaseUtil.scala
new file mode 100644
index 0000000..62b0aff
--- /dev/null
+++ b/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/DictionaryTestCaseUtil.scala
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.spark.util
+
+import org.apache.spark.sql.hive.CarbonRelation
+import org.apache.spark.sql.test.TestQueryExecutor
+
+import org.apache.carbondata.core.cache.dictionary.DictionaryColumnUniqueIdentifier
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.metadata.CarbonTableIdentifier
+import org.apache.carbondata.spark.load.CarbonLoaderUtil
+
+/**
+ * Utility for global dictionary test cases
+ */
+object DictionaryTestCaseUtil {
+
+  /**
+   *  check whether the dictionary of specified column generated
+   * @param relation  carbon table relation
+   * @param columnName  name of specified column
+   * @param value  a value of column
+   */
+  def checkDictionary(relation: CarbonRelation, columnName: String, value: String) {
+    val table = relation.tableMeta.carbonTable
+    val dimension = table.getDimensionByName(table.getFactTableName, columnName)
+    val tableIdentifier = new CarbonTableIdentifier(table.getDatabaseName, table.getFactTableName, "uniqueid")
+    val columnIdentifier = new DictionaryColumnUniqueIdentifier(tableIdentifier,
+      dimension.getColumnIdentifier, dimension.getDataType
+    )
+    val dict = CarbonLoaderUtil.getDictionary(columnIdentifier, TestQueryExecutor.storeLocation)
+    assert(dict.getSurrogateKey(value) != CarbonCommonConstants.INVALID_SURROGATE_KEY)
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/fd50ad26/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/ExternalColumnDictionaryTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/ExternalColumnDictionaryTestCase.scala b/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/ExternalColumnDictionaryTestCase.scala
new file mode 100644
index 0000000..10f99b7
--- /dev/null
+++ b/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/ExternalColumnDictionaryTestCase.scala
@@ -0,0 +1,284 @@
+/*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+package org.apache.carbondata.spark.util
+
+import org.apache.spark.sql.common.util.QueryTest
+import org.apache.spark.sql.hive.CarbonRelation
+import org.apache.spark.sql.{CarbonEnv, SparkSession}
+import org.scalatest.BeforeAndAfterAll
+
+import org.apache.carbondata.common.logging.LogServiceFactory
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.util.CarbonProperties
+import org.apache.carbondata.processing.constants.TableOptionConstant
+import org.apache.carbondata.processing.etl.DataLoadingException
+import org.apache.carbondata.processing.model.{CarbonDataLoadSchema, CarbonLoadModel}
+import org.apache.carbondata.spark.exception.MalformedCarbonCommandException
+
+/**
+ * test case for external column dictionary generation
+ * also support complicated type
+ */
+class ExternalColumnDictionaryTestCase extends QueryTest with BeforeAndAfterAll {
+  private val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
+  var extComplexRelation: CarbonRelation = _
+  var verticalDelimiteRelation: CarbonRelation = _
+  var loadSqlRelation: CarbonRelation = _
+  var filePath: String = _
+  var pwd: String = _
+  var complexFilePath1: String = _
+  var complexFilePath2: String = _
+  var extColDictFilePath1: String = _
+  var extColDictFilePath2: String = _
+  var extColDictFilePath3: String = _
+  var header: String = _
+  var header2: String = _
+
+  def buildTestData() = {
+
+    filePath = s"${ resourcesPath }/sample.csv"
+    complexFilePath1 = s"${ resourcesPath }/complexdata2.csv"
+    complexFilePath2 = s"${ resourcesPath }/verticalDelimitedData.csv"
+    extColDictFilePath1 = s"deviceInformationId:${ resourcesPath }/deviceInformationId.csv," +
+                          s"mobile.imei:${ resourcesPath }/mobileimei.csv," +
+                          s"mac:${ resourcesPath }/mac.csv," +
+                          s"locationInfo.ActiveCountry:${ resourcesPath
+                          }/locationInfoActiveCountry.csv"
+    extColDictFilePath2 = s"deviceInformationId:${ resourcesPath }/deviceInformationId2.csv"
+    extColDictFilePath3 = s"channelsId:${ resourcesPath }/channelsId.csv"
+    header = "deviceInformationId,channelsId,ROMSize,purchasedate,mobile,MAC," +
+             "locationinfo,proddate,gamePointId,contractNumber"
+    header2 = "deviceInformationId,channelsId,contractNumber"
+  }
+
+  def buildTable() = {
+    try {
+      sql(
+        """CREATE TABLE extComplextypes (deviceInformationId int,
+     channelsId string, ROMSize string, purchasedate string,
+     mobile struct<imei:string, imsi:string>, MAC array<string>,
+     locationinfo array<struct<ActiveAreaId:int, ActiveCountry:string,
+     ActiveProvince:string, Activecity:string, ActiveDistrict:string, ActiveStreet:string>>,
+     proddate struct<productionDate:string,activeDeactivedate:array<string>>,
+     gamePointId double,contractNumber double)
+     STORED BY 'org.apache.carbondata.format'
+     TBLPROPERTIES('DICTIONARY_INCLUDE' = 'deviceInformationId')
+        """)
+    } catch {
+      case ex: Throwable => LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
+    }
+
+    try {
+      sql(
+        """CREATE TABLE verticalDelimitedTable (deviceInformationId int,
+     channelsId string,contractNumber double)
+     STORED BY 'org.apache.carbondata.format'
+     TBLPROPERTIES('DICTIONARY_INCLUDE' = 'deviceInformationId')
+        """)
+    } catch {
+      case ex: Throwable => LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
+    }
+
+    try {
+      sql(
+        """CREATE TABLE loadSqlTest (deviceInformationId int,
+     channelsId string, ROMSize string, purchasedate string,
+     mobile struct<imei:string, imsi:string>, MAC array<string>,
+     locationinfo array<struct<ActiveAreaId:int, ActiveCountry:string,
+     ActiveProvince:string, Activecity:string, ActiveDistrict:string, ActiveStreet:string>>,
+     proddate struct<productionDate:string,activeDeactivedate:array<string>>,
+     gamePointId double,contractNumber double)
+     STORED BY 'org.apache.carbondata.format'
+     TBLPROPERTIES('DICTIONARY_INCLUDE' = 'deviceInformationId')
+        """)
+    } catch {
+      case ex: Throwable => LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
+    }
+  }
+
+  def buildRelation() = {
+    val warehouse = s"$resourcesPath/target/warehouse"
+    val storeLocation = s"$resourcesPath/target/store"
+    val metastoredb = s"$resourcesPath/target"
+    CarbonProperties.getInstance()
+      .addProperty("carbon.custom.distribution", "true")
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION,"FORCE")
+    import org.apache.spark.sql.CarbonSession._
+
+    val spark = SparkSession
+      .builder()
+      .master("local")
+      .appName("CarbonSessionExample")
+      .config("spark.sql.warehouse.dir", warehouse)
+      .config("spark.network.timeout", "600s")
+      .config("spark.executor.heartbeatInterval", "600s")
+      .config("carbon.enable.vector.reader","false")
+      .getOrCreateCarbonSession(storeLocation, metastoredb)
+    val catalog = CarbonEnv.getInstance(spark).carbonMetastore
+    extComplexRelation = catalog
+      .lookupRelation(Option(CarbonCommonConstants.DATABASE_DEFAULT_NAME),
+        "extComplextypes")(spark)
+      .asInstanceOf[CarbonRelation]
+    verticalDelimiteRelation = catalog
+      .lookupRelation(Option(CarbonCommonConstants.DATABASE_DEFAULT_NAME),
+        "verticalDelimitedTable")(spark)
+      .asInstanceOf[CarbonRelation]
+    loadSqlRelation = catalog.lookupRelation(Option(CarbonCommonConstants.DATABASE_DEFAULT_NAME),
+      "loadSqlTest")(spark)
+      .asInstanceOf[CarbonRelation]
+  }
+
+  def buildCarbonLoadModel(relation: CarbonRelation,
+      filePath: String,
+      header: String,
+      extColFilePath: String,
+      csvDelimiter: String = ","): CarbonLoadModel = {
+    val carbonLoadModel = new CarbonLoadModel
+    carbonLoadModel.setTableName(relation.tableMeta.carbonTableIdentifier.getDatabaseName)
+    carbonLoadModel.setDatabaseName(relation.tableMeta.carbonTableIdentifier.getTableName)
+    val table = relation.tableMeta.carbonTable
+    val carbonSchema = new CarbonDataLoadSchema(table)
+    carbonLoadModel.setDatabaseName(table.getDatabaseName)
+    carbonLoadModel.setTableName(table.getFactTableName)
+    carbonLoadModel.setCarbonDataLoadSchema(carbonSchema)
+    carbonLoadModel.setFactFilePath(filePath)
+    carbonLoadModel.setCsvHeader(header)
+    carbonLoadModel.setCsvDelimiter(csvDelimiter)
+    carbonLoadModel.setComplexDelimiterLevel1("\\$")
+    carbonLoadModel.setComplexDelimiterLevel2("\\:")
+    carbonLoadModel.setColDictFilePath(extColFilePath)
+    carbonLoadModel.setQuoteChar("\"");
+    carbonLoadModel.setSerializationNullFormat(
+      TableOptionConstant.SERIALIZATION_NULL_FORMAT.getName + ",\\N")
+    carbonLoadModel.setDefaultTimestampFormat(CarbonProperties.getInstance().getProperty(
+      CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
+      CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT))
+    carbonLoadModel.setDefaultDateFormat(CarbonProperties.getInstance().getProperty(
+      CarbonCommonConstants.CARBON_DATE_FORMAT,
+      CarbonCommonConstants.CARBON_DATE_DEFAULT_FORMAT))
+    carbonLoadModel.setCsvHeaderColumns(CommonUtil.getCsvHeaderColumns(carbonLoadModel))
+    carbonLoadModel.setMaxColumns("100")
+    carbonLoadModel
+  }
+
+  override def beforeAll {
+    buildTestData
+    buildTable
+    buildRelation
+  }
+
+  test("Generate global dictionary from external column file") {
+    // load the first time
+    var carbonLoadModel = buildCarbonLoadModel(extComplexRelation, complexFilePath1,
+      header, extColDictFilePath1)
+    GlobalDictionaryUtil.generateGlobalDictionary(sqlContext, carbonLoadModel,
+      extComplexRelation.tableMeta.storePath)
+    // check whether the dictionary is generated
+    DictionaryTestCaseUtil.checkDictionary(
+      extComplexRelation, "deviceInformationId", "10086")
+
+    // load the second time
+    carbonLoadModel = buildCarbonLoadModel(extComplexRelation, complexFilePath1,
+      header, extColDictFilePath2)
+    GlobalDictionaryUtil.generateGlobalDictionary(sqlContext, carbonLoadModel,
+      extComplexRelation.tableMeta.storePath)
+    // check the old dictionary and whether the new distinct value is generated
+    DictionaryTestCaseUtil.checkDictionary(
+      extComplexRelation, "deviceInformationId", "10086")
+    DictionaryTestCaseUtil.checkDictionary(
+      extComplexRelation, "deviceInformationId", "10011")
+  }
+
+  test("When csv delimiter is not comma") {
+    //  when csv delimiter is comma
+    var carbonLoadModel = buildCarbonLoadModel(extComplexRelation, complexFilePath1,
+      header, extColDictFilePath3)
+    GlobalDictionaryUtil.generateGlobalDictionary(sqlContext, carbonLoadModel,
+      extComplexRelation.tableMeta.storePath)
+    // check whether the dictionary is generated
+    DictionaryTestCaseUtil.checkDictionary(
+      extComplexRelation, "channelsId", "1421|")
+
+    //  when csv delimiter is not comma
+    carbonLoadModel = buildCarbonLoadModel(verticalDelimiteRelation, complexFilePath2,
+      header2, extColDictFilePath3, "|")
+    GlobalDictionaryUtil.generateGlobalDictionary(sqlContext, carbonLoadModel,
+      verticalDelimiteRelation.tableMeta.storePath)
+    // check whether the dictionary is generated
+    DictionaryTestCaseUtil.checkDictionary(
+      verticalDelimiteRelation, "channelsId", "1431,")
+  }
+
+  test("LOAD DML with COLUMNDICT option") {
+    try {
+      sql(
+        s"""
+      LOAD DATA LOCAL INPATH "$complexFilePath1" INTO TABLE loadSqlTest
+      OPTIONS('FILEHEADER'='$header', 'COLUMNDICT'='$extColDictFilePath1')
+        """)
+    } catch {
+      case ex: Exception =>
+        LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
+        assert(false)
+    }
+    DictionaryTestCaseUtil.checkDictionary(
+      loadSqlRelation, "deviceInformationId", "10086")
+  }
+
+  test("COLUMNDICT and ALL_DICTIONARY_PATH can not be used together") {
+    try {
+      sql(
+        s"""
+        LOAD DATA LOCAL INPATH "$complexFilePath1" INTO TABLE loadSqlTest
+        OPTIONS('COLUMNDICT'='$extColDictFilePath1',"ALL_DICTIONARY_PATH"='$extColDictFilePath1')
+        """)
+      assert(false)
+    } catch {
+      case ex: MalformedCarbonCommandException =>
+        assertResult(ex.getMessage)(
+          "Error: COLUMNDICT and ALL_DICTIONARY_PATH can not be used together " +
+          "in options")
+      case _: Throwable => assert(false)
+    }
+  }
+
+  test("Measure can not use COLUMNDICT") {
+    try {
+      sql(
+        s"""
+      LOAD DATA LOCAL INPATH "$complexFilePath1" INTO TABLE loadSqlTest
+      OPTIONS('FILEHEADER'='$header', 'COLUMNDICT'='gamePointId:$filePath')
+      """)
+      assert(false)
+    } catch {
+      case ex: DataLoadingException =>
+        assertResult(ex.getMessage)(
+          "Column gamePointId is not a key column. Only key column can be part " +
+          "of dictionary and used in COLUMNDICT option.")
+      case _: Throwable => assert(false)
+    }
+  }
+
+  override def afterAll: Unit = {
+    sql("DROP TABLE extComplextypes")
+    sql("DROP TABLE verticalDelimitedTable")
+    sql("DROP TABLE loadSqlTest")
+  }
+}


[04/42] carbondata git commit: added check for starting dictionary server

Posted by ra...@apache.org.
added check for starting dictionary server

moved single pass test suite to common module


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/6f554504
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/6f554504
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/6f554504

Branch: refs/heads/branch-1.1
Commit: 6f55450482601488a4762a01cb4316f51d0f9025
Parents: 38a5144
Author: kunal642 <ku...@knoldus.in>
Authored: Wed May 17 13:07:12 2017 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 12:44:11 2017 +0530

----------------------------------------------------------------------
 .../generator/ServerDictionaryGenerator.java    |   4 +-
 .../dataload/TestLoadDataWithSinglePass.scala   | 129 +++++++++++++++++++
 .../execution/command/carbonTableSchema.scala   |  21 ++-
 .../dataload/TestLoadDataWithSinglePass.scala   | 111 ----------------
 .../spark/rdd/CarbonDataRDDFactory.scala        |   4 +-
 .../execution/command/carbonTableSchema.scala   |  21 ++-
 6 files changed, 166 insertions(+), 124 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/6f554504/core/src/main/java/org/apache/carbondata/core/dictionary/generator/ServerDictionaryGenerator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/dictionary/generator/ServerDictionaryGenerator.java b/core/src/main/java/org/apache/carbondata/core/dictionary/generator/ServerDictionaryGenerator.java
index cd168b8..456e885 100644
--- a/core/src/main/java/org/apache/carbondata/core/dictionary/generator/ServerDictionaryGenerator.java
+++ b/core/src/main/java/org/apache/carbondata/core/dictionary/generator/ServerDictionaryGenerator.java
@@ -73,7 +73,9 @@ public class ServerDictionaryGenerator implements DictionaryGenerator<Integer, D
 
   public void writeTableDictionaryData(String tableUniqueName) throws Exception {
     TableDictionaryGenerator generator = tableMap.get(tableUniqueName);
-    generator.writeDictionaryData(tableUniqueName);
+    if (generator != null) {
+      generator.writeDictionaryData(tableUniqueName);
+    }
   }
 
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6f554504/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataWithSinglePass.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataWithSinglePass.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataWithSinglePass.scala
new file mode 100644
index 0000000..3bb16f2
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataWithSinglePass.scala
@@ -0,0 +1,129 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.integration.spark.testsuite.dataload
+
+import org.apache.spark.sql.Row
+import org.apache.spark.sql.common.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+/**
+  * Test Class for data loading use one pass
+  *
+  */
+class TestLoadDataWithSinglePass extends QueryTest with BeforeAndAfterAll {
+
+  override def beforeAll {
+    sql("DROP TABLE IF EXISTS table_two_pass")
+    sql("DROP TABLE IF EXISTS table_one_pass")
+    sql("DROP TABLE IF EXISTS table_one_pass_2")
+
+    sql(
+      """
+        |CREATE TABLE table_two_pass (ID int, date Timestamp, country String,
+        |name String, phonetype String, serialname String, salary int)
+        |STORED BY 'org.apache.carbondata.format'
+      """.stripMargin)
+
+    sql(
+      s"""
+        |LOAD DATA local inpath '$resourcesPath/source.csv' INTO TABLE table_two_pass
+        |OPTIONS('DELIMITER'= ',', 'SINGLE_PASS'='false')
+      """.stripMargin)
+
+    sql(
+      """
+        |CREATE TABLE table_one_pass (ID int, date Timestamp, country String,
+        |name String, phonetype String, serialname String, salary int)
+        |STORED BY 'org.apache.carbondata.format'
+      """.stripMargin)
+
+    sql(
+      s"""
+        |LOAD DATA local inpath '$resourcesPath/source.csv' INTO TABLE table_one_pass
+        |OPTIONS('DELIMITER'= ',', 'SINGLE_PASS'='true')
+      """.stripMargin)
+  }
+
+  test("test data loading use one pass") {
+    checkAnswer(
+      sql("select * from table_one_pass"),
+      sql("select * from table_two_pass")
+    )
+  }
+
+  test("test data loading use one pass when offer column dictionary file") {
+    sql(
+      """
+        |CREATE TABLE table_one_pass_2 (ID int, date Timestamp, country String,
+        |name String, phonetype String, serialname String, salary int)
+        |STORED BY 'org.apache.carbondata.format'
+      """.stripMargin)
+    sql(
+      s"""
+        |LOAD DATA local inpath '$resourcesPath/source.csv' INTO TABLE table_one_pass_2
+        |OPTIONS('DELIMITER'= ',', 'SINGLE_PASS'='true', 'COLUMNDICT'=
+        |'country:$resourcesPath/columndictionary/country.csv, name:$resourcesPath/columndictionary/name.csv')
+      """.stripMargin)
+
+    checkAnswer(
+      sql("select * from table_one_pass_2"),
+      sql("select * from table_two_pass")
+    )
+  }
+
+  test("test data loading use one pass when do incremental load") {
+    sql(
+      s"""
+        |LOAD DATA local inpath '$resourcesPath/dataIncrement.csv' INTO TABLE table_two_pass
+        |OPTIONS('DELIMITER'= ',', 'SINGLE_PASS'='false')
+      """.stripMargin)
+    sql(
+      s"""
+        |LOAD DATA local inpath '$resourcesPath/dataIncrement.csv' INTO TABLE table_one_pass
+        |OPTIONS('DELIMITER'= ',', 'SINGLE_PASS'='true')
+      """.stripMargin)
+
+    checkAnswer(
+      sql("select * from table_one_pass"),
+      sql("select * from table_two_pass")
+    )
+  }
+
+  test("test data loading with dctionary exclude") {
+    sql("DROP TABLE IF EXISTS dict_exclude")
+    sql(
+      """
+        |CREATE TABLE dict_exclude (ID int, date Timestamp, country String,
+        |name String, phonetype String, serialname String, salary int)
+        |STORED BY 'org.apache.carbondata.format' TBLPROPERTIES('DICTIONARY_EXCLUDE'='country,name,serialname,phonetype')
+      """.stripMargin)
+    sql(
+      s"""
+         |LOAD DATA local inpath '$resourcesPath/source.csv' INTO TABLE dict_exclude
+         |OPTIONS('DELIMITER'= ',', 'SINGLE_PASS'='FALSE')
+      """.stripMargin)
+    checkAnswer(sql("select name from dict_exclude limit 1"),Row("aaa1"))
+  }
+
+  override def afterAll {
+    sql("DROP TABLE IF EXISTS table_two_pass")
+    sql("DROP TABLE IF EXISTS table_one_pass")
+    sql("DROP TABLE IF EXISTS table_one_pass_2")
+    sql("DROP TABLE IF EXISTS dict_exclude")
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6f554504/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
index c770e1b..9745ddd 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
@@ -495,16 +495,27 @@ case class LoadTable(
               CarbonCommonConstants.DICTIONARY_SERVER_PORT_DEFAULT)
           val sparkDriverHost = sqlContext.sparkContext.getConf.get("spark.driver.host")
           carbonLoadModel.setDictionaryServerHost(sparkDriverHost)
-          // start dictionary server when use one pass load.
-          val server: DictionaryServer = DictionaryServer
-            .getInstance(dictionaryServerPort.toInt)
-          carbonLoadModel.setDictionaryServerPort(server.getPort)
+          // start dictionary server when use one pass load and dimension with DICTIONARY
+          // encoding is present.
+          val allDimensions = table.getAllDimensions.asScala.toList
+          val createDictionary = allDimensions.exists {
+            carbonDimension => carbonDimension.hasEncoding(Encoding.DICTIONARY) &&
+              !carbonDimension.hasEncoding(Encoding.DIRECT_DICTIONARY)
+          }
+          val server: Option[DictionaryServer] = if (createDictionary) {
+            val dictionaryServer = DictionaryServer
+              .getInstance(dictionaryServerPort.toInt)
+            carbonLoadModel.setDictionaryServerPort(dictionaryServer.getPort)
+            Some(dictionaryServer)
+          } else {
+            None
+          }
           CarbonDataRDDFactory.loadCarbonData(sqlContext,
             carbonLoadModel,
             relation.tableMeta.storePath,
             columnar,
             partitionStatus,
-            Some(server),
+            server,
             dataFrame,
             updateModel)
         } else {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6f554504/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithSinglePass.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithSinglePass.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithSinglePass.scala
deleted file mode 100644
index 1d456d3..0000000
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataWithSinglePass.scala
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.carbondata.spark.testsuite.dataload
-
-import org.apache.spark.sql.common.util.QueryTest
-import org.scalatest.BeforeAndAfterAll
-
-/**
-  * Test Class for data loading use one pass
-  *
-  */
-class TestLoadDataWithSinglePass extends QueryTest with BeforeAndAfterAll {
-
-  override def beforeAll {
-    sql("DROP TABLE IF EXISTS table_two_pass")
-    sql("DROP TABLE IF EXISTS table_one_pass")
-    sql("DROP TABLE IF EXISTS table_one_pass_2")
-
-    sql(
-      """
-        |CREATE TABLE table_two_pass (ID int, date Timestamp, country String,
-        |name String, phonetype String, serialname String, salary int)
-        |STORED BY 'org.apache.carbondata.format'
-      """.stripMargin)
-
-    sql(
-      s"""
-        |LOAD DATA local inpath '$resourcesPath/source.csv' INTO TABLE table_two_pass
-        |OPTIONS('DELIMITER'= ',', 'SINGLE_PASS'='false')
-      """.stripMargin)
-
-    sql(
-      """
-        |CREATE TABLE table_one_pass (ID int, date Timestamp, country String,
-        |name String, phonetype String, serialname String, salary int)
-        |STORED BY 'org.apache.carbondata.format'
-      """.stripMargin)
-
-    sql(
-      s"""
-        |LOAD DATA local inpath '$resourcesPath/source.csv' INTO TABLE table_one_pass
-        |OPTIONS('DELIMITER'= ',', 'SINGLE_PASS'='true')
-      """.stripMargin)
-  }
-
-  test("test data loading use one pass") {
-    checkAnswer(
-      sql("select * from table_one_pass"),
-      sql("select * from table_two_pass")
-    )
-  }
-
-  test("test data loading use one pass when offer column dictionary file") {
-    sql(
-      """
-        |CREATE TABLE table_one_pass_2 (ID int, date Timestamp, country String,
-        |name String, phonetype String, serialname String, salary int)
-        |STORED BY 'org.apache.carbondata.format'
-      """.stripMargin)
-    sql(
-      s"""
-        |LOAD DATA local inpath '$resourcesPath/source.csv' INTO TABLE table_one_pass_2
-        |OPTIONS('DELIMITER'= ',', 'SINGLE_PASS'='true', 'COLUMNDICT'=
-        |'country:$resourcesPath/columndictionary/country.csv, name:$resourcesPath/columndictionary/name.csv')
-      """.stripMargin)
-
-    checkAnswer(
-      sql("select * from table_one_pass_2"),
-      sql("select * from table_two_pass")
-    )
-  }
-
-  test("test data loading use one pass when do incremental load") {
-    sql(
-      s"""
-        |LOAD DATA local inpath '$resourcesPath/dataIncrement.csv' INTO TABLE table_two_pass
-        |OPTIONS('DELIMITER'= ',', 'SINGLE_PASS'='false')
-      """.stripMargin)
-    sql(
-      s"""
-        |LOAD DATA local inpath '$resourcesPath/dataIncrement.csv' INTO TABLE table_one_pass
-        |OPTIONS('DELIMITER'= ',', 'SINGLE_PASS'='true')
-      """.stripMargin)
-
-    checkAnswer(
-      sql("select * from table_one_pass"),
-      sql("select * from table_two_pass")
-    )
-  }
-
-  override def afterAll {
-    sql("DROP TABLE IF EXISTS table_two_pass")
-    sql("DROP TABLE IF EXISTS table_one_pass")
-    sql("DROP TABLE IF EXISTS table_one_pass_2")
-  }
-}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6f554504/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala b/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
index 835af35..ede63ec 100644
--- a/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
+++ b/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
@@ -861,7 +861,7 @@ object CarbonDataRDDFactory {
 
   private def writeDictionary(carbonLoadModel: CarbonLoadModel,
       result: Option[DictionaryServer], writeAll: Boolean) = {
-    // write dictionary file and shutdown dictionary server
+    // write dictionary file
     val uniqueTableName: String = s"${ carbonLoadModel.getDatabaseName }_${
       carbonLoadModel.getTableName
     }"
@@ -874,7 +874,7 @@ object CarbonDataRDDFactory {
             server.writeTableDictionary(uniqueTableName)
           }
         } catch {
-          case ex: Exception =>
+          case _: Exception =>
             LOGGER.error(s"Error while writing dictionary file for $uniqueTableName")
             throw new Exception("Dataload failed due to error while writing dictionary file!")
         }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6f554504/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
index 11b3115..94a95fd 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
@@ -504,16 +504,27 @@ case class LoadTable(
           val sparkDriverHost = sparkSession.sqlContext.sparkContext.
             getConf.get("spark.driver.host")
           carbonLoadModel.setDictionaryServerHost(sparkDriverHost)
-          // start dictionary server when use one pass load.
-          val server: DictionaryServer = DictionaryServer
-            .getInstance(dictionaryServerPort.toInt)
-          carbonLoadModel.setDictionaryServerPort(server.getPort)
+          // start dictionary server when use one pass load and dimension with DICTIONARY
+          // encoding is present.
+          val allDimensions = table.getAllDimensions.asScala.toList
+          val createDictionary = allDimensions.exists {
+            carbonDimension => carbonDimension.hasEncoding(Encoding.DICTIONARY) &&
+                               !carbonDimension.hasEncoding(Encoding.DIRECT_DICTIONARY)
+          }
+          val server: Option[DictionaryServer] = if (createDictionary) {
+            val dictionaryServer = DictionaryServer
+              .getInstance(dictionaryServerPort.toInt)
+            carbonLoadModel.setDictionaryServerPort(dictionaryServer.getPort)
+            Some(dictionaryServer)
+          } else {
+            None
+          }
           CarbonDataRDDFactory.loadCarbonData(sparkSession.sqlContext,
             carbonLoadModel,
             relation.tableMeta.storePath,
             columnar,
             partitionStatus,
-            Some(server),
+            server,
             dataFrame,
             updateModel)
         }


[27/42] carbondata git commit: Fix issue of CarbonData-1134

Posted by ra...@apache.org.
Fix issue of CarbonData-1134


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/0a0b7b1a
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/0a0b7b1a
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/0a0b7b1a

Branch: refs/heads/branch-1.1
Commit: 0a0b7b1a185e2e0175e3e14ab48be9df86c10952
Parents: 2403f28
Author: chenerlu <ch...@huawei.com>
Authored: Tue Jun 6 18:57:13 2017 +0800
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 13:17:08 2017 +0530

----------------------------------------------------------------------
 .../main/scala/org/apache/spark/sql/test/TestQueryExecutor.scala    | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/0a0b7b1a/integration/spark-common/src/main/scala/org/apache/spark/sql/test/TestQueryExecutor.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/spark/sql/test/TestQueryExecutor.scala b/integration/spark-common/src/main/scala/org/apache/spark/sql/test/TestQueryExecutor.scala
index a5fef5e..a01ccb2 100644
--- a/integration/spark-common/src/main/scala/org/apache/spark/sql/test/TestQueryExecutor.scala
+++ b/integration/spark-common/src/main/scala/org/apache/spark/sql/test/TestQueryExecutor.scala
@@ -53,6 +53,7 @@ object TestQueryExecutor {
   val INSTANCE = lookupQueryExecutor.newInstance().asInstanceOf[TestQueryExecutorRegister]
   CarbonProperties.getInstance()
     .addProperty(CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION, "FORCE")
+    .addProperty(CarbonCommonConstants.STORE_LOCATION, storeLocation)
   private def lookupQueryExecutor: Class[_] = {
     ServiceLoader.load(classOf[TestQueryExecutorRegister], Utils.getContextOrSparkClassLoader)
       .iterator().next().getClass


[20/42] carbondata git commit: IUD support in 2.1

Posted by ra...@apache.org.
IUD support in 2.1


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/b2026970
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/b2026970
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/b2026970

Branch: refs/heads/branch-1.1
Commit: b202697019dcf18ba8cd41ef16c49b407269d5c1
Parents: 43e06b6
Author: ravikiran23 <ra...@gmail.com>
Authored: Fri May 26 11:44:53 2017 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 12:58:34 2017 +0530

----------------------------------------------------------------------
 .../iud/DeleteCarbonTableTestCase.scala         | 131 +++
 .../iud/UpdateCarbonTableTestCase.scala         | 393 +++++++++
 .../spark/sql/CarbonCatalystOperators.scala     |  24 +
 .../sql/execution/command/IUDCommands.scala     | 857 +++++++++++++++++++
 .../spark/sql/hive/CarbonAnalysisRules.scala    | 138 +++
 .../spark/sql/hive/CarbonSessionState.scala     |   2 +
 .../sql/optimizer/CarbonLateDecodeRule.scala    |  24 +-
 .../sql/parser/CarbonSpark2SqlParser.scala      | 130 ++-
 8 files changed, 1694 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/b2026970/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala
new file mode 100644
index 0000000..33ae0d3
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala
@@ -0,0 +1,131 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.carbondata.spark.testsuite.iud
+
+import org.apache.spark.sql.Row
+import org.apache.spark.sql.common.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+class DeleteCarbonTableTestCase extends QueryTest with BeforeAndAfterAll {
+  override def beforeAll {
+
+    sql("use default")
+    sql("drop database  if exists iud_db cascade")
+    sql("create database  iud_db")
+
+    sql("""create table iud_db.source2 (c11 string,c22 int,c33 string,c55 string, c66 int) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH 'D:/apacheCarbon/carbondata/integration/spark-common-test/src/test/resources/IUD/source2.csv' INTO table iud_db.source2""")
+    sql("use iud_db")
+  }
+  test("delete data from carbon table with alias [where clause ]") {
+    sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH 'D:/apacheCarbon/carbondata/integration/spark-common-test/src/test/resources/IUD/dest.csv' INTO table iud_db.dest""")
+//    sql(s"""select getTupleId() as tupleId from dest """).show
+    sql("""delete from iud_db.dest d where d.c1 = 'a'""").show
+    checkAnswer(
+      sql("""select c2 from iud_db.dest"""),
+      Seq(Row(2), Row(3),Row(4), Row(5))
+    )
+  }
+  test("delete data from  carbon table[where clause ]") {
+    sql("""drop table if exists iud_db.dest""")
+    sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH 'D:/apacheCarbon/carbondata/integration/spark-common-test/src/test/resources/IUD/dest.csv' INTO table iud_db.dest""")
+    sql("""delete from iud_db.dest e where e.c2 = 2""").show
+    checkAnswer(
+      sql("""select c1 from dest"""),
+      Seq(Row("a"), Row("c"), Row("d"), Row("e"))
+    )
+  }
+  test("delete data from  carbon table[where IN  ]") {
+    sql("""drop table if exists iud_db.dest""")
+    sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH 'D:/apacheCarbon/carbondata/integration/spark-common-test/src/test/resources/IUD/dest.csv' INTO table iud_db.dest""")
+    sql("""delete from iud_db.dest where c1 IN ('d', 'e')""").show
+    checkAnswer(
+      sql("""select c1 from dest"""),
+      Seq(Row("a"), Row("b"),Row("c"))
+    )
+  }
+
+  test("delete data from  carbon table[with alias No where clause]") {
+    sql("""drop table if exists iud_db.dest""")
+    sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH 'D:/apacheCarbon/carbondata/integration/spark-common-test/src/test/resources/IUD/dest.csv' INTO table iud_db.dest""")
+    sql("""delete from iud_db.dest a""").show
+    checkAnswer(
+      sql("""select c1 from iud_db.dest"""),
+      Seq()
+    )
+  }
+  test("delete data from  carbon table[No alias No where clause]") {
+    sql("""drop table if exists iud_db.dest""")
+    sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH 'D:/apacheCarbon/carbondata/integration/spark-common-test/src/test/resources/IUD/dest.csv' INTO table iud_db.dest""")
+    sql("""delete from dest""").show()
+    checkAnswer(
+      sql("""select c1 from dest"""),
+      Seq()
+    )
+  }
+
+  test("delete data from  carbon table[ JOIN with another table ]") {
+    sql("""drop table if exists iud_db.dest""")
+    sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH 'D:/apacheCarbon/carbondata/integration/spark-common-test/src/test/resources/IUD/dest.csv' INTO table iud_db.dest""")
+    sql(""" DELETE FROM dest t1 INNER JOIN source2 t2 ON t1.c1 = t2.c11""").show(truncate = false)
+    checkAnswer(
+      sql("""select c1 from iud_db.dest"""),
+      Seq(Row("c"), Row("d"), Row("e"))
+    )
+  }
+
+  test("delete data from  carbon table[where IN (sub query) ]") {
+    sql("""drop table if exists iud_db.dest""")
+    sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""").show()
+    sql("""LOAD DATA LOCAL INPATH 'D:/apacheCarbon/carbondata/integration/spark-common-test/src/test/resources/IUD/dest.csv' INTO table iud_db.dest""")
+    sql("""delete from  iud_db.dest where c1 IN (select c11 from source2)""").show(truncate = false)
+    checkAnswer(
+      sql("""select c1 from iud_db.dest"""),
+      Seq(Row("c"), Row("d"), Row("e"))
+    )
+  }
+  test("delete data from  carbon table[where IN (sub query with where clause) ]") {
+    sql("""drop table if exists iud_db.dest""")
+    sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""").show()
+    sql("""LOAD DATA LOCAL INPATH 'D:/apacheCarbon/carbondata/integration/spark-common-test/src/test/resources/IUD/dest.csv' INTO table iud_db.dest""")
+    sql("""delete from  iud_db.dest where c1 IN (select c11 from source2 where c11 = 'b')""").show()
+    checkAnswer(
+      sql("""select c1 from iud_db.dest"""),
+      Seq(Row("a"), Row("c"), Row("d"), Row("e"))
+    )
+  }
+  test("delete data from  carbon table[where numeric condition  ]") {
+    sql("""drop table if exists iud_db.dest""")
+    sql("""create table iud_db.dest (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH 'D:/apacheCarbon/carbondata/integration/spark-common-test/src/test/resources/IUD/dest.csv' INTO table iud_db.dest""")
+    sql("""delete from iud_db.dest where c2 >= 4""").show()
+    checkAnswer(
+      sql("""select count(*) from iud_db.dest"""),
+      Seq(Row(3))
+    )
+  }
+  override def afterAll {
+  //  sql("use default")
+  //  sql("drop database  if exists iud_db cascade")
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/b2026970/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
new file mode 100644
index 0000000..0ad700b
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
@@ -0,0 +1,393 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.carbondata.spark.testsuite.iud
+
+import org.apache.spark.sql.Row
+import org.apache.spark.sql.common.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.util.CarbonProperties
+
+class UpdateCarbonTableTestCase extends QueryTest with BeforeAndAfterAll {
+  override def beforeAll {
+
+    sql("drop database if exists iud cascade")
+    sql("create database iud")
+    sql("use iud")
+    sql("""create table iud.dest (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest""")
+    sql("""create table iud.source2 (c11 string,c22 int,c33 string,c55 string, c66 int) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/source2.csv' INTO table iud.source2""")
+    sql("""create table iud.other (c1 string,c2 int) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/other.csv' INTO table iud.other""")
+    sql("""create table iud.hdest (c1 string,c2 int,c3 string,c5 string) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n' STORED AS TEXTFILE""").show()
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.hdest""")
+    sql("""CREATE TABLE iud.update_01(imei string,age int,task bigint,num double,level decimal(10,3),name string)STORED BY 'org.apache.carbondata.format' """)
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/update01.csv' INTO TABLE iud.update_01 OPTIONS('BAD_RECORDS_LOGGER_ENABLE' = 'FALSE', 'BAD_RECORDS_ACTION' = 'FORCE') """)
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.isHorizontalCompactionEnabled , "true")
+  }
+
+
+//  test("test update operation with 0 rows updation.") {
+//    sql("""drop table if exists iud.zerorows""").show
+//    sql("""create table iud.zerorows (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+//    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.zerorows""")
+//    sql("""update zerorows d  set (d.c2) = (d.c2 + 1) where d.c1 = 'a'""").show()
+//    sql("""update zerorows d  set (d.c2) = (d.c2 + 1) where d.c1 = 'xxx'""").show()
+//     checkAnswer(
+//      sql("""select c1,c2,c3,c5 from iud.zerorows"""),
+//      Seq(Row("a",2,"aa","aaa"),Row("b",2,"bb","bbb"),Row("c",3,"cc","ccc"),Row("d",4,"dd","ddd"),Row("e",5,"ee","eee"))
+//    )
+//    sql("""drop table iud.zerorows""").show
+//
+//
+//  }
+
+
+  test("update carbon table[select from source table with where and exist]") {
+      sql("""drop table if exists iud.dest11""").show
+      sql("""create table iud.dest11 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+      sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest11""")
+      sql("""update iud.dest11 d set (d.c3, d.c5 ) = (select s.c33,s.c55 from iud.source2 s where d.c1 = s.c11) where 1 = 1""").show()
+      checkAnswer(
+        sql("""select c3,c5 from iud.dest11"""),
+        Seq(Row("cc","ccc"), Row("dd","ddd"),Row("ee","eee"), Row("MGM","Disco"),Row("RGK","Music"))
+      )
+      sql("""drop table iud.dest11""").show
+   }
+
+//   test("update carbon table[using destination table columns with where and exist]") {
+//    sql("""drop table if exists iud.dest22""")
+//    sql("""create table iud.dest22 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+//    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest22""")
+//    checkAnswer(
+//      sql("""select c2 from iud.dest22 where c1='a'"""),
+//      Seq(Row(1))
+//    )
+//    sql("""update dest22 d  set (d.c2) = (d.c2 + 1) where d.c1 = 'a'""").show()
+//    checkAnswer(
+//      sql("""select c2 from iud.dest22 where c1='a'"""),
+//      Seq(Row(2))
+//    )
+//    sql("""drop table iud.dest22""")
+//   }
+
+//   test("update carbon table without alias in set columns") {
+//      sql("""drop table iud.dest33""")
+//      sql("""create table iud.dest33 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+//      sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest33""")
+//      sql("""update iud.dest33 d set (c3,c5 ) = (select s.c33 ,s.c55  from iud.source2 s where d.c1 = s.c11) where d.c1 = 'a'""").show()
+//      checkAnswer(
+//        sql("""select c3,c5 from iud.dest33 where c1='a'"""),
+//        Seq(Row("MGM","Disco"))
+//      )
+//      sql("""drop table iud.dest33""")
+//  }
+//
+//  test("update carbon table without alias in set columns with mulitple loads") {
+//    sql("""drop table iud.dest33""")
+//    sql("""create table iud.dest33 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+//    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest33""")
+//    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest33""")
+//    sql("""update iud.dest33 d set (c3,c5 ) = (select s.c33 ,s.c55  from iud.source2 s where d.c1 = s.c11) where d.c1 = 'a'""").show()
+//    checkAnswer(
+//      sql("""select c3,c5 from iud.dest33 where c1='a'"""),
+//      Seq(Row("MGM","Disco"),Row("MGM","Disco"))
+//    )
+//    sql("""drop table iud.dest33""")
+//  }
+//
+//   test("update carbon table without alias in set three columns") {
+//     sql("""drop table iud.dest44""")
+//     sql("""create table iud.dest44 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+//     sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest44""")
+//     sql("""update iud.dest44 d set (c1,c3,c5 ) = (select s.c11, s.c33 ,s.c55  from iud.source2 s where d.c1 = s.c11) where d.c1 = 'a'""").show()
+//     checkAnswer(
+//       sql("""select c1,c3,c5 from iud.dest44 where c1='a'"""),
+//       Seq(Row("a","MGM","Disco"))
+//     )
+//     sql("""drop table iud.dest44""")
+//   }
+//
+//   test("update carbon table[single column select from source with where and exist]") {
+//      sql("""drop table iud.dest55""")
+//      sql("""create table iud.dest55 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+//      sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest55""")
+//     sql("""update iud.dest55 d set (c3)  = (select s.c33 from iud.source2 s where d.c1 = s.c11) where 1 = 1""").show()
+//      checkAnswer(
+//        sql("""select c1,c3 from iud.dest55 """),
+//        Seq(Row("a","MGM"),Row("b","RGK"),Row("c","cc"),Row("d","dd"),Row("e","ee"))
+//      )
+//      sql("""drop table iud.dest55""")
+//   }
+//
+//  test("update carbon table[single column SELECT from source with where and exist]") {
+//    sql("""drop table iud.dest55""")
+//    sql("""create table iud.dest55 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+//    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest55""")
+//    sql("""update iud.dest55 d set (c3)  = (SELECT s.c33 from iud.source2 s where d.c1 = s.c11) where 1 = 1""").show()
+//    checkAnswer(
+//      sql("""select c1,c3 from iud.dest55 """),
+//      Seq(Row("a","MGM"),Row("b","RGK"),Row("c","cc"),Row("d","dd"),Row("e","ee"))
+//    )
+//    sql("""drop table iud.dest55""")
+//  }
+//
+//   test("update carbon table[using destination table columns without where clause]") {
+//     sql("""drop table iud.dest66""")
+//     sql("""create table iud.dest66 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+//     sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest66""")
+//     sql("""update iud.dest66 d set (c2, c5 ) = (c2 + 1, concat(c5 , "z"))""").show()
+//     checkAnswer(
+//       sql("""select c2,c5 from iud.dest66 """),
+//       Seq(Row(2,"aaaz"),Row(3,"bbbz"),Row(4,"cccz"),Row(5,"dddz"),Row(6,"eeez"))
+//     )
+//     sql("""drop table iud.dest66""")
+//   }
+//
+//   test("update carbon table[using destination table columns with where clause]") {
+//       sql("""drop table iud.dest77""")
+//       sql("""create table iud.dest77 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+//       sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest77""")
+//       sql("""update iud.dest77 d set (c2, c5 ) = (c2 + 1, concat(c5 , "z")) where d.c3 = 'dd'""").show()
+//       checkAnswer(
+//         sql("""select c2,c5 from iud.dest77 where c3 = 'dd'"""),
+//         Seq(Row(5,"dddz"))
+//       )
+//       sql("""drop table iud.dest77""")
+//   }
+//
+//   test("update carbon table[using destination table( no alias) columns without where clause]") {
+//     sql("""drop table iud.dest88""")
+//     sql("""create table iud.dest88 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+//     sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest88""")
+//     sql("""update iud.dest88  set (c2, c5 ) = (c2 + 1, concat(c5 , "y" ))""").show()
+//     checkAnswer(
+//       sql("""select c2,c5 from iud.dest88 """),
+//       Seq(Row(2,"aaay"),Row(3,"bbby"),Row(4,"cccy"),Row(5,"dddy"),Row(6,"eeey"))
+//     )
+//     sql("""drop table iud.dest88""")
+//   }
+//
+//   test("update carbon table[using destination table columns with hard coded value ]") {
+//     sql("""drop table iud.dest99""")
+//     sql("""create table iud.dest99 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+//     sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest99""")
+//     sql("""update iud.dest99 d set (c2, c5 ) = (c2 + 1, "xyx")""").show()
+//     checkAnswer(
+//       sql("""select c2,c5 from iud.dest99 """),
+//       Seq(Row(2,"xyx"),Row(3,"xyx"),Row(4,"xyx"),Row(5,"xyx"),Row(6,"xyx"))
+//     )
+//     sql("""drop table iud.dest99""")
+//   }
+//
+//   test("update carbon tableusing destination table columns with hard coded value and where condition]") {
+//     sql("""drop table iud.dest110""")
+//     sql("""create table iud.dest110 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+//     sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest110""")
+//     sql("""update iud.dest110 d set (c2, c5 ) = (c2 + 1, "xyx") where d.c1 = 'e'""").show()
+//     checkAnswer(
+//       sql("""select c2,c5 from iud.dest110 where c1 = 'e' """),
+//       Seq(Row(6,"xyx"))
+//     )
+//     sql("""drop table iud.dest110""")
+//   }
+//
+//   test("update carbon table[using source  table columns with where and exist and no destination table condition]") {
+//     sql("""drop table iud.dest120""")
+//     sql("""create table iud.dest120 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+//     sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest120""")
+//     sql("""update iud.dest120 d  set (c3, c5 ) = (select s.c33 ,s.c55  from iud.source2 s where d.c1 = s.c11)""").show()
+//     checkAnswer(
+//       sql("""select c3,c5 from iud.dest120 """),
+//       Seq(Row("MGM","Disco"),Row("RGK","Music"),Row("cc","ccc"),Row("dd","ddd"),Row("ee","eee"))
+//     )
+//     sql("""drop table iud.dest120""")
+//   }
+//
+//   test("update carbon table[using destination table where and exist]") {
+//     sql("""drop table iud.dest130""")
+//     sql("""create table iud.dest130 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+//     sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest130""")
+//     sql("""update iud.dest130 dd  set (c2, c5 ) = (c2 + 1, "xyx")  where dd.c1 = 'a'""").show()
+//     checkAnswer(
+//       sql("""select c2,c5 from iud.dest130 where c1 = 'a' """),
+//       Seq(Row(2,"xyx"))
+//     )
+//     sql("""drop table iud.dest130""")
+//   }
+//
+//   test("update carbon table[using destination table (concat) where and exist]") {
+//     sql("""drop table iud.dest140""")
+//     sql("""create table iud.dest140 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+//     sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest140""")
+//     sql("""update iud.dest140 d set (c2, c5 ) = (c2 + 1, concat(c5 , "z"))  where d.c1 = 'a'""").show()
+//     checkAnswer(
+//       sql("""select c2,c5 from iud.dest140 where c1 = 'a'"""),
+//       Seq(Row(2,"aaaz"))
+//     )
+//     sql("""drop table iud.dest140""")
+//   }
+//
+//   test("update carbon table[using destination table (concat) with  where") {
+//     sql("""drop table iud.dest150""")
+//     sql("""create table iud.dest150 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+//     sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest150""")
+//     sql("""update iud.dest150 d set (c5) = (concat(c5 , "z"))  where d.c1 = 'b'""").show()
+//     checkAnswer(
+//       sql("""select c5 from iud.dest150 where c1 = 'b' """),
+//       Seq(Row("bbbz"))
+//     )
+//     sql("""drop table iud.dest150""")
+//   }
+//
+//  test("update table with data for datatype mismatch with column ") {
+//    sql("""update iud.update_01 set (imei) = ('skt') where level = 'aaa'""")
+//    checkAnswer(
+//      sql("""select * from iud.update_01 where imei = 'skt'"""),
+//      Seq()
+//    )
+//  }
+//
+//   test("update carbon table-error[more columns in source table not allowed") {
+//     val exception = intercept[Exception] {
+//       sql("""update iud.dest d set (c2, c5 ) = (c2 + 1, concat(c5 , "z"), "abc")""").show()
+//     }
+//     assertResult("Number of source and destination columns are not matching")(exception.getMessage)
+//   }
+//
+//   test("update carbon table-error[no set columns") {
+//     intercept[Exception] {
+//       sql("""update iud.dest d set () = ()""").show()
+//     }
+//   }
+//
+//   test("update carbon table-error[no set columns with updated column") {
+//     intercept[Exception] {
+//       sql("""update iud.dest d set  = (c1+1)""").show()
+//     }
+//   }
+//   test("update carbon table-error[one set column with two updated column") {
+//     intercept[Exception] {
+//       sql("""update iud.dest  set c2 = (c2 + 1, concat(c5 , "z") )""").show()
+//     }
+//   }
+//
+// test("""update carbon [special characters  in value- test parsing logic ]""") {
+//    sql("""drop table iud.dest160""")
+//    sql("""create table iud.dest160 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+//    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest160""")
+//    sql("""update iud.dest160 set(c1) = ("ab\')$*)(&^)")""").show()
+//    sql("""update iud.dest160 set(c1) =  ('abd$asjdh$adasj$l;sdf$*)$*)(&^')""").show()
+//    sql("""update iud.dest160 set(c1) =("\\")""").show()
+//    sql("""update iud.dest160 set(c1) = ("ab\')$*)(&^)")""").show()
+//    sql("""update iud.dest160 d set (c3,c5)=(select s.c33,'a\\a' from iud.source2 s where d.c1 = s.c11 and d.c2 = s.c22) where  d.c2 between 1 and 3""").show()
+//    sql("""update iud.dest160 d set (c3,c5)=(select s.c33,'\\' from iud.source2 s where d.c1 = s.c11 and d.c2 = s.c22) where  d.c2 between 1 and 3""").show()
+//    sql("""update iud.dest160 d set (c3,c5)=(select s.c33,'\\a' from iud.source2 s where d.c1 = s.c11 and d.c2 = s.c22) where  d.c2 between 1 and 3""").show()
+//    sql("""update iud.dest160 d set (c3,c5)      =     (select s.c33,'a\\a\\' from iud.source2 s where d.c1 = s.c11 and d.c2 = s.c22) where  d.c2 between 1 and 3""").show()
+//    sql("""update iud.dest160 d set (c3,c5) =(select s.c33,'a\'a\\' from iud.source2 s where d.c1 = s.c11 and d.c2 = s.c22) where  d.c2 between 1 and 3""").show()
+//    sql("""update iud.dest160 d set (c3,c5)=(select s.c33,'\\a\'a\"' from iud.source2 s where d.c1 = s.c11 and d.c2 = s.c22) where  d.c2 between 1 and 3""").show()
+//    sql("""drop table iud.dest160""")
+//  }
+//
+//  test("""update carbon [sub query, between and existing in outer condition.(Customer query ) ]""") {
+//    sql("""drop table iud.dest170""")
+//    sql("""create table iud.dest170 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+//    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest170""")
+//    sql("""update iud.dest170 d set (c3)=(select s.c33 from iud.source2 s where d.c1 = s.c11 and d.c2 = s.c22) where  d.c2 between 1 and 3""").show()
+//    checkAnswer(
+//      sql("""select c3 from  iud.dest170 as d where d.c2 between 1 and 3"""),
+//      Seq(Row("MGM"), Row("RGK"), Row("cc"))
+//    )
+//    sql("""drop table iud.dest170""")
+//  }
+//
+//  test("""update carbon [self join select query ]""") {
+//    sql("""drop table iud.dest171""")
+//    sql("""create table iud.dest171 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+//    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest171""")
+//    sql("""update iud.dest171 d set (c3)=(select concat(s.c3 , "z") from iud.dest171 s where d.c2 = s.c2)""").show
+//    sql("""drop table iud.dest172""")
+//    sql("""create table iud.dest172 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
+//    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest172""")
+//    sql("""update iud.dest172 d set (c3)=( concat(c3 , "z"))""").show
+//    checkAnswer(
+//      sql("""select c3 from  iud.dest171"""),
+//      sql("""select c3 from  iud.dest172""")
+//    )
+//    sql("""drop table iud.dest171""")
+//    sql("""drop table iud.dest172""")
+//  }
+//
+//  test("update carbon table-error[closing bracket missed") {
+//    intercept[Exception] {
+//      sql("""update iud.dest d set (c2) = (194""").show()
+//    }
+//  }
+//
+//  test("update carbon table-error[starting bracket missed") {
+//    intercept[Exception] {
+//      sql("""update iud.dest d set (c2) = 194)""").show()
+//    }
+//  }
+//
+//  test("update carbon table-error[missing starting and closing bracket") {
+//    intercept[Exception] {
+//      sql("""update iud.dest d set (c2) = 194""").show()
+//    }
+//  }
+//
+//  test("test create table with column name as tupleID"){
+//    intercept[Exception] {
+//      sql("CREATE table carbontable (empno int, tupleID String, " +
+//          "designation String, doj Timestamp, workgroupcategory int, " +
+//          "workgroupcategoryname String, deptno int, deptname String, projectcode int, " +
+//          "projectjoindate Timestamp, projectenddate Timestamp, attendance int, " +
+//          "utilization int,salary int) STORED BY 'org.apache.carbondata.format' " +
+//          "TBLPROPERTIES('DICTIONARY_INCLUDE'='empno,workgroupcategory,deptno,projectcode'," +
+//          "'DICTIONARY_EXCLUDE'='empname')")
+//    }
+//  }
+//
+//  test("Failure of update operation due to bad record with proper error message") {
+//    try {
+//      CarbonProperties.getInstance()
+//        .addProperty(CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION, "FAIL")
+//      val errorMessage = intercept[Exception] {
+//        sql("drop table if exists update_with_bad_record")
+//        sql("create table update_with_bad_record(item int, name String) stored by 'carbondata'")
+//        sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/IUD/bad_record.csv' into table " +
+//            s"update_with_bad_record")
+//        sql("update update_with_bad_record set (item)=(3.45)").show()
+//        sql("drop table if exists update_with_bad_record")
+//      }
+//      assert(errorMessage.getMessage.contains("Data load failed due to bad record"))
+//    } finally {
+//      CarbonProperties.getInstance()
+//        .addProperty(CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION, "FORCE")
+//    }
+//  }
+
+  override def afterAll {
+//    sql("use default")
+//    sql("drop database  if exists iud cascade")
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.isHorizontalCompactionEnabled , "true")
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/b2026970/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonCatalystOperators.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonCatalystOperators.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonCatalystOperators.scala
index 5b47fcf..6651abe 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonCatalystOperators.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonCatalystOperators.scala
@@ -18,6 +18,7 @@
 package org.apache.spark.sql
 
 import org.apache.spark.sql.catalyst.TableIdentifier
+import org.apache.spark.sql.catalyst.analysis.UnresolvedRelation
 import org.apache.spark.sql.catalyst.expressions._
 import org.apache.spark.sql.catalyst.plans.logical.{UnaryNode, _}
 import org.apache.spark.sql.execution.datasources.LogicalRelation
@@ -86,6 +87,29 @@ case class ShowLoadsCommand(databaseNameOp: Option[String], table: String, limit
   }
 }
 
+case class ProjectForUpdate(
+    table: UnresolvedRelation,
+    columns: List[String],
+    child: Seq[LogicalPlan] ) extends Command {
+  override def output: Seq[AttributeReference] = Seq.empty
+}
+
+case class UpdateTable(
+    table: UnresolvedRelation,
+    columns: List[String],
+    selectStmt: String,
+    filer: String) extends LogicalPlan {
+  override def children: Seq[LogicalPlan] = Seq.empty
+  override def output: Seq[AttributeReference] = Seq.empty
+}
+
+case class DeleteRecords(
+    statement: String,
+    table: UnresolvedRelation) extends LogicalPlan {
+  override def children: Seq[LogicalPlan] = Seq.empty
+  override def output: Seq[AttributeReference] = Seq.empty
+}
+
 /**
  * Describe formatted for hive table
  */

http://git-wip-us.apache.org/repos/asf/carbondata/blob/b2026970/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/IUDCommands.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/IUDCommands.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/IUDCommands.scala
new file mode 100644
index 0000000..39d03bb
--- /dev/null
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/IUDCommands.scala
@@ -0,0 +1,857 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.execution.command
+
+import java.util
+
+import scala.collection.JavaConverters._
+import scala.collection.mutable.ListBuffer
+
+import org.apache.spark.rdd.RDD
+import org.apache.spark.sql.{CarbonDatasourceHadoopRelation, CarbonEnv, DataFrame, Dataset, Row, SparkSession, getDB}
+import org.apache.spark.sql.catalyst.TableIdentifier
+import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, Project}
+import org.apache.spark.sql.execution.datasources.LogicalRelation
+import org.apache.spark.sql.hive.CarbonRelation
+import org.apache.spark.storage.StorageLevel
+
+import org.apache.carbondata.common.logging.LogServiceFactory
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.datastore.impl.FileFactory
+import org.apache.carbondata.core.locks.{CarbonLockFactory, CarbonLockUtil, LockUsage}
+import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier
+import org.apache.carbondata.core.metadata.schema.table.CarbonTable
+import org.apache.carbondata.core.mutate.{CarbonUpdateUtil, DeleteDeltaBlockDetails, SegmentUpdateDetails, TupleIdEnum}
+import org.apache.carbondata.core.mutate.data.RowCountDetailsVO
+import org.apache.carbondata.core.statusmanager.{SegmentStatusManager, SegmentUpdateStatusManager}
+import org.apache.carbondata.core.util.CarbonProperties
+import org.apache.carbondata.core.util.path.{CarbonStorePath, CarbonTablePath}
+import org.apache.carbondata.core.writer.CarbonDeleteDeltaWriterImpl
+import org.apache.carbondata.processing.exception.MultipleMatchingException
+import org.apache.carbondata.processing.merger.{CarbonDataMergerUtil, CarbonDataMergerUtilResult, CompactionType}
+import org.apache.carbondata.spark.DeleteDelataResultImpl
+import org.apache.carbondata.spark.load.FailureCauses
+import org.apache.carbondata.spark.util.QueryPlanUtil
+
+
+/**
+ * IUD update delete and compaction framework.
+ *
+ */
+
+private[sql] case class ProjectForDeleteCommand(
+     plan: LogicalPlan,
+     identifier: Seq[String],
+     timestamp: String) extends RunnableCommand {
+
+  val LOG = LogServiceFactory.getLogService(this.getClass.getName)
+  var horizontalCompactionFailed = false
+
+  override def run(sparkSession: SparkSession): Seq[Row] = {
+    val dataFrame = Dataset.ofRows(sparkSession, plan)
+//    dataFrame.show(truncate = false)
+//    dataFrame.collect().foreach(println)
+    val dataRdd = dataFrame.rdd
+
+    val relation = CarbonEnv.getInstance(sparkSession).carbonMetastore
+      .lookupRelation(deleteExecution.getTableIdentifier(identifier))(sparkSession).
+      asInstanceOf[CarbonRelation]
+    val carbonTable = relation.tableMeta.carbonTable
+    val metadataLock = CarbonLockFactory
+      .getCarbonLockObj(carbonTable.getAbsoluteTableIdentifier.getCarbonTableIdentifier,
+        LockUsage.METADATA_LOCK)
+    var lockStatus = false
+    try {
+      lockStatus = metadataLock.lockWithRetries()
+      LOG.audit(s" Delete data request has been received " +
+                s"for ${ relation.databaseName }.${ relation.tableName }.")
+      if (lockStatus) {
+        LOG.info("Successfully able to get the table metadata file lock")
+      }
+      else {
+        throw new Exception("Table is locked for deletion. Please try after some time")
+      }
+      val tablePath = CarbonStorePath.getCarbonTablePath(
+        carbonTable.getStorePath,
+        carbonTable.getAbsoluteTableIdentifier.getCarbonTableIdentifier)
+      var executorErrors = new ExecutionErrors(FailureCauses.NONE, "")
+
+        // handle the clean up of IUD.
+        CarbonUpdateUtil.cleanUpDeltaFiles(carbonTable, false)
+
+          if (deleteExecution
+            .deleteDeltaExecution(identifier, sparkSession, dataRdd, timestamp, relation,
+              false, executorErrors)) {
+            // call IUD Compaction.
+            IUDCommon.tryHorizontalCompaction(sparkSession, relation, isUpdateOperation = false)
+          }
+    } catch {
+      case e: HorizontalCompactionException =>
+          LOG.error("Delete operation passed. Exception in Horizontal Compaction." +
+              " Please check logs. " + e.getMessage)
+          CarbonUpdateUtil.cleanStaleDeltaFiles(carbonTable, e.compactionTimeStamp.toString)
+
+      case e: Exception =>
+        LOG.error("Exception in Delete data operation " + e.getMessage)
+        // ****** start clean up.
+        // In case of failure , clean all related delete delta files
+        CarbonUpdateUtil.cleanStaleDeltaFiles(carbonTable, timestamp)
+
+        // clean up. Null check is required as for executor error some times message is null
+        if (null != e.getMessage) {
+          sys.error("Delete data operation is failed. " + e.getMessage)
+        }
+        else {
+          sys.error("Delete data operation is failed. Please check logs.")
+        }
+    } finally {
+      if (lockStatus) {
+        CarbonLockUtil.fileUnlock(metadataLock, LockUsage.METADATA_LOCK)
+      }
+    }
+    Seq.empty
+  }
+}
+
+private[sql] case class ProjectForUpdateCommand(
+    plan: LogicalPlan, tableIdentifier: Seq[String]) extends RunnableCommand {
+  val LOGGER = LogServiceFactory.getLogService(ProjectForUpdateCommand.getClass.getName)
+
+  override def run(sparkSession: SparkSession): Seq[Row] = {
+
+
+   //  sqlContext.sparkContext.setLocalProperty(org.apache.spark.sql.execution.SQLExecution
+    //  .EXECUTION_ID_KEY, null)
+    // DataFrame(sqlContext, plan).show(truncate = false)
+    // return Seq.empty
+
+
+    val res = plan find {
+      case relation: LogicalRelation if (relation.relation
+        .isInstanceOf[CarbonDatasourceHadoopRelation]) =>
+        true
+      case _ => false
+    }
+
+    if (!res.isDefined) {
+      return Seq.empty
+    }
+    val relation = CarbonEnv.getInstance(sparkSession).carbonMetastore
+      .lookupRelation(deleteExecution.getTableIdentifier(tableIdentifier))(sparkSession).
+      asInstanceOf[CarbonRelation]
+//    val relation = CarbonEnv.get.carbonMetastore
+//      .lookupRelation1(deleteExecution.getTableIdentifier(tableIdentifier))(sqlContext).
+//      asInstanceOf[CarbonRelation]
+    val carbonTable = relation.tableMeta.carbonTable
+    val metadataLock = CarbonLockFactory
+      .getCarbonLockObj(carbonTable.getAbsoluteTableIdentifier.getCarbonTableIdentifier,
+        LockUsage.METADATA_LOCK)
+    var lockStatus = false
+    // get the current time stamp which should be same for delete and update.
+    val currentTime = CarbonUpdateUtil.readCurrentTime
+//    var dataFrame: DataFrame = null
+    var dataSet: DataFrame = null
+    val isPersistEnabledUserValue = CarbonProperties.getInstance
+      .getProperty(CarbonCommonConstants.isPersistEnabled,
+        CarbonCommonConstants.defaultValueIsPersistEnabled)
+   var isPersistEnabled = CarbonCommonConstants.defaultValueIsPersistEnabled.toBoolean
+    if (isPersistEnabledUserValue.equalsIgnoreCase("false")) {
+      isPersistEnabled = false
+    }
+    else if (isPersistEnabledUserValue.equalsIgnoreCase("true")) {
+      isPersistEnabled = true
+    }
+    try {
+      lockStatus = metadataLock.lockWithRetries()
+      if (lockStatus) {
+        logInfo("Successfully able to get the table metadata file lock")
+      }
+      else {
+        throw new Exception("Table is locked for updation. Please try after some time")
+      }
+      val tablePath = CarbonStorePath.getCarbonTablePath(
+        carbonTable.getStorePath,
+        carbonTable.getAbsoluteTableIdentifier.getCarbonTableIdentifier)
+        // Get RDD.
+
+      dataSet = if (isPersistEnabled) {
+          Dataset.ofRows(sparkSession, plan).persist(StorageLevel.MEMORY_AND_DISK)
+//          DataFrame(sqlContext, plan)
+//            .persist(StorageLevel.MEMORY_AND_DISK)
+        }
+        else {
+          Dataset.ofRows(sparkSession, plan)
+//          DataFrame(sqlContext, plan)
+        }
+        var executionErrors = new ExecutionErrors(FailureCauses.NONE, "")
+
+
+        // handle the clean up of IUD.
+        CarbonUpdateUtil.cleanUpDeltaFiles(carbonTable, false)
+
+        // do delete operation.
+        deleteExecution.deleteDeltaExecution(tableIdentifier, sparkSession, dataSet.rdd,
+          currentTime + "",
+        relation, isUpdateOperation = true, executionErrors)
+
+        if(executionErrors.failureCauses != FailureCauses.NONE) {
+          throw new Exception(executionErrors.errorMsg)
+        }
+
+        // do update operation.
+        UpdateExecution.performUpdate(dataSet, tableIdentifier, plan,
+          sparkSession, currentTime, executionErrors)
+
+        if(executionErrors.failureCauses != FailureCauses.NONE) {
+          throw new Exception(executionErrors.errorMsg)
+        }
+
+        // Do IUD Compaction.
+        IUDCommon.tryHorizontalCompaction(sparkSession, relation, isUpdateOperation = true)
+    }
+
+    catch {
+      case e: HorizontalCompactionException =>
+        LOGGER.error(
+            "Update operation passed. Exception in Horizontal Compaction. Please check logs." + e)
+        // In case of failure , clean all related delta files
+        CarbonUpdateUtil.cleanStaleDeltaFiles(carbonTable, e.compactionTimeStamp.toString)
+
+      case e: Exception =>
+        LOGGER.error("Exception in update operation" + e)
+        // ****** start clean up.
+        // In case of failure , clean all related delete delta files
+        CarbonUpdateUtil.cleanStaleDeltaFiles(carbonTable, currentTime + "")
+
+        // *****end clean up.
+        if (null != e.getMessage) {
+          sys.error("Update operation failed. " + e.getMessage)
+        }
+        if (null != e.getCause && null != e.getCause.getMessage) {
+          sys.error("Update operation failed. " + e.getCause.getMessage)
+        }
+        sys.error("Update operation failed. please check logs.")
+    }
+    finally {
+      if (null != dataSet && isPersistEnabled) {
+        dataSet.unpersist()
+      }
+      if (lockStatus) {
+        CarbonLockUtil.fileUnlock(metadataLock, LockUsage.METADATA_LOCK)
+      }
+    }
+    Seq.empty
+  }
+}
+
+object IUDCommon {
+
+  val LOG = LogServiceFactory.getLogService(this.getClass.getName)
+
+  /**
+   * The method does horizontal compaction. After Update and Delete completion
+   * tryHorizontal compaction will be called. In case this method is called after
+   * Update statement then Update Compaction followed by Delete Compaction will be
+   * processed whereas for tryHorizontalCompaction called after Delete statement
+   * then only Delete Compaction will be processed.
+    *
+    * @param sparkSession
+   * @param carbonRelation
+   * @param isUpdateOperation
+   */
+  def tryHorizontalCompaction(sparkSession: SparkSession,
+      carbonRelation: CarbonRelation,
+      isUpdateOperation: Boolean): Unit = {
+
+    var ishorizontalCompaction = CarbonDataMergerUtil.isHorizontalCompactionEnabled()
+
+    if (ishorizontalCompaction == false) {
+      return
+    }
+
+    var compactionTypeIUD = CompactionType.IUD_UPDDEL_DELTA_COMPACTION
+    val carbonTable = carbonRelation.tableMeta.carbonTable
+    val (db, table) = (carbonTable.getDatabaseName, carbonTable.getFactTableName)
+    val absTableIdentifier = carbonTable.getAbsoluteTableIdentifier
+    val updateTimeStamp = System.currentTimeMillis()
+    // To make sure that update and delete timestamps are not same,
+    // required to commit to status metadata and cleanup
+    val deleteTimeStamp = updateTimeStamp + 1
+
+    // get the valid segments
+    var segLists = CarbonDataMergerUtil.getValidSegmentList(absTableIdentifier)
+
+    if (segLists == null || segLists.size() == 0) {
+      return
+    }
+
+    // Should avoid reading Table Status file from Disk every time. Better to load it
+    // in-memory at the starting and pass it along the routines. The constructor of
+    // SegmentUpdateStatusManager reads the Table Status File and Table Update Status
+    // file and save the content in segmentDetails and updateDetails respectively.
+    val segmentUpdateStatusManager: SegmentUpdateStatusManager = new SegmentUpdateStatusManager(
+      absTableIdentifier)
+
+    if (isUpdateOperation == true) {
+
+      // This is only update operation, perform only update compaction.
+      compactionTypeIUD = CompactionType.IUD_UPDDEL_DELTA_COMPACTION
+      performUpdateDeltaCompaction(sparkSession,
+        compactionTypeIUD,
+        carbonTable,
+        absTableIdentifier,
+        segmentUpdateStatusManager,
+        updateTimeStamp,
+        segLists)
+    }
+
+    // After Update Compaction perform delete compaction
+    compactionTypeIUD = CompactionType.IUD_DELETE_DELTA_COMPACTION
+    segLists = CarbonDataMergerUtil.getValidSegmentList(absTableIdentifier)
+    if (segLists == null || segLists.size() == 0) {
+      return
+    }
+
+    // Delete Compaction
+    performDeleteDeltaCompaction(sparkSession,
+      compactionTypeIUD,
+      carbonTable,
+      absTableIdentifier,
+      segmentUpdateStatusManager,
+      deleteTimeStamp,
+      segLists)
+  }
+
+  /**
+   * Update Delta Horizontal Compaction.
+    *
+    * @param sparkSession
+   * @param compactionTypeIUD
+   * @param carbonTable
+   * @param absTableIdentifier
+   * @param segLists
+   */
+  private def performUpdateDeltaCompaction(sparkSession: SparkSession,
+      compactionTypeIUD: CompactionType,
+      carbonTable: CarbonTable,
+      absTableIdentifier: AbsoluteTableIdentifier,
+      segmentUpdateStatusManager: SegmentUpdateStatusManager,
+      factTimeStamp: Long,
+      segLists: util.List[String]): Unit = {
+    val db = carbonTable.getDatabaseName
+    val table = carbonTable.getFactTableName
+    // get the valid segments qualified for update compaction.
+    val validSegList = CarbonDataMergerUtil.getSegListIUDCompactionQualified(segLists,
+      absTableIdentifier,
+      segmentUpdateStatusManager,
+      compactionTypeIUD)
+
+    if (validSegList.size() == 0) {
+      return
+    }
+
+    LOG.info(s"Horizontal Update Compaction operation started for [${db}.${table}].")
+    LOG.audit(s"Horizontal Update Compaction operation started for [${db}.${table}].")
+
+    try {
+      // Update Compaction.
+      val altertablemodel = AlterTableModel(Option(carbonTable.getDatabaseName),
+        carbonTable.getFactTableName,
+        Some(segmentUpdateStatusManager),
+        CompactionType.IUD_UPDDEL_DELTA_COMPACTION.toString,
+        Some(factTimeStamp),
+        "")
+
+      AlterTableCompaction(altertablemodel).run(sparkSession)
+    }
+    catch {
+      case e: Exception =>
+        val msg = if (null != e.getMessage) {
+          e.getMessage
+        } else {
+          "Please check logs for more info"
+        }
+        throw new HorizontalCompactionException(
+          s"Horizontal Update Compaction Failed for [${ db }.${ table }]. " + msg, factTimeStamp)
+    }
+    LOG.info(s"Horizontal Update Compaction operation completed for [${ db }.${ table }].")
+    LOG.audit(s"Horizontal Update Compaction operation completed for [${ db }.${ table }].")
+  }
+
+  /**
+   * Delete Delta Horizontal Compaction.
+    *
+    * @param sparkSession
+   * @param compactionTypeIUD
+   * @param carbonTable
+   * @param absTableIdentifier
+   * @param segLists
+   */
+  private def performDeleteDeltaCompaction(sparkSession: SparkSession,
+      compactionTypeIUD: CompactionType,
+      carbonTable: CarbonTable,
+      absTableIdentifier: AbsoluteTableIdentifier,
+      segmentUpdateStatusManager: SegmentUpdateStatusManager,
+      factTimeStamp: Long,
+      segLists: util.List[String]): Unit = {
+
+    val db = carbonTable.getDatabaseName
+    val table = carbonTable.getFactTableName
+    val deletedBlocksList = CarbonDataMergerUtil.getSegListIUDCompactionQualified(segLists,
+      absTableIdentifier,
+      segmentUpdateStatusManager,
+      compactionTypeIUD)
+
+    if (deletedBlocksList.size() == 0) {
+      return
+    }
+
+    LOG.info(s"Horizontal Delete Compaction operation started for [${db}.${table}].")
+    LOG.audit(s"Horizontal Delete Compaction operation started for [${db}.${table}].")
+
+    try {
+
+      // Delete Compaction RDD
+      val rdd1 = sparkSession.sparkContext
+        .parallelize(deletedBlocksList.asScala.toSeq, deletedBlocksList.size())
+
+      val timestamp = factTimeStamp
+      val updateStatusDetails = segmentUpdateStatusManager.getUpdateStatusDetails
+      val result = rdd1.mapPartitions(iter =>
+        new Iterator[Seq[CarbonDataMergerUtilResult]] {
+          override def hasNext: Boolean = iter.hasNext
+
+          override def next(): Seq[CarbonDataMergerUtilResult] = {
+            val segmentAndBlocks = iter.next
+            val segment = segmentAndBlocks.substring(0, segmentAndBlocks.lastIndexOf("/"))
+            val blockName = segmentAndBlocks
+              .substring(segmentAndBlocks.lastIndexOf("/") + 1, segmentAndBlocks.length)
+
+            val result = CarbonDataMergerUtil.compactBlockDeleteDeltaFiles(segment, blockName,
+              absTableIdentifier,
+              updateStatusDetails,
+              timestamp)
+
+            result.asScala.toList
+
+          }
+        }).collect
+
+      val resultList = ListBuffer[CarbonDataMergerUtilResult]()
+      result.foreach(x => {
+        x.foreach(y => {
+          resultList += y
+        })
+      })
+
+      val updateStatus = CarbonDataMergerUtil.updateStatusFile(resultList.toList.asJava,
+        carbonTable,
+        timestamp.toString,
+        segmentUpdateStatusManager)
+      if (updateStatus == false) {
+        LOG.audit(s"Delete Compaction data operation is failed for [${db}.${table}].")
+        LOG.error("Delete Compaction data operation is failed.")
+        throw new HorizontalCompactionException(
+          s"Horizontal Delete Compaction Failed for [${db}.${table}] ." +
+          s" Please check logs for more info.", factTimeStamp)
+      }
+      else {
+        LOG.info(s"Horizontal Delete Compaction operation completed for [${db}.${table}].")
+        LOG.audit(s"Horizontal Delete Compaction operation completed for [${db}.${table}].")
+      }
+    }
+    catch {
+      case e: Exception =>
+        val msg = if (null != e.getMessage) {
+          e.getMessage
+        } else {
+          "Please check logs for more info"
+        }
+        throw new HorizontalCompactionException(
+          s"Horizontal Delete Compaction Failed for [${ db }.${ table }]. " + msg, factTimeStamp)
+    }
+  }
+}
+
+class HorizontalCompactionException(
+    message: String,
+    // required for cleanup
+    val compactionTimeStamp: Long) extends RuntimeException(message) {
+}
+
+object deleteExecution {
+  val LOGGER = LogServiceFactory.getLogService(this.getClass.getName)
+
+  def getTableIdentifier(tableIdentifier: Seq[String]): TableIdentifier = {
+    if (tableIdentifier.size > 1) {
+      TableIdentifier(tableIdentifier(1), Some(tableIdentifier(0)))
+    } else {
+      TableIdentifier(tableIdentifier(0), None)
+    }
+  }
+
+  def deleteDeltaExecution(identifier: Seq[String],
+                           sparkSession: SparkSession,
+                           dataRdd: RDD[Row],
+                           timestamp: String, relation: CarbonRelation, isUpdateOperation: Boolean,
+                           executorErrors: ExecutionErrors): Boolean = {
+
+    var res: Array[List[(String, (SegmentUpdateDetails, ExecutionErrors))]] = null
+    val tableName = getTableIdentifier(identifier).table
+    val database = getDB.getDatabaseName(getTableIdentifier(identifier).database, sparkSession)
+    val relation = CarbonEnv.getInstance(sparkSession).carbonMetastore
+      .lookupRelation(deleteExecution.getTableIdentifier(identifier))(sparkSession).
+      asInstanceOf[CarbonRelation]
+
+    val storeLocation = relation.tableMeta.storePath
+    val absoluteTableIdentifier: AbsoluteTableIdentifier = new
+        AbsoluteTableIdentifier(storeLocation,
+          relation.tableMeta.carbonTableIdentifier)
+    var tablePath = CarbonStorePath
+      .getCarbonTablePath(storeLocation,
+        absoluteTableIdentifier.getCarbonTableIdentifier())
+    var tableUpdateStatusPath = tablePath.getTableUpdateStatusFilePath
+    val totalSegments =
+      SegmentStatusManager.readLoadMetadata(tablePath.getMetadataDirectoryPath).length
+    var factPath = tablePath.getFactDir
+
+    var carbonTable = relation.tableMeta.carbonTable
+    var deleteStatus = true
+    val deleteRdd = if (isUpdateOperation) {
+      val schema =
+        org.apache.spark.sql.types.StructType(Seq(org.apache.spark.sql.types.StructField(
+          CarbonCommonConstants.CARBON_IMPLICIT_COLUMN_TUPLEID,
+          org.apache.spark.sql.types.StringType)))
+      val rdd = dataRdd
+        .map(row => Row(row.get(row.fieldIndex(
+          CarbonCommonConstants.CARBON_IMPLICIT_COLUMN_TUPLEID))))
+      sparkSession.createDataFrame(rdd, schema).rdd
+      // sqlContext.createDataFrame(rdd, schema).rdd
+    } else {
+      dataRdd
+    }
+
+    val (carbonInputFormat, job) =
+      QueryPlanUtil.createCarbonInputFormat(absoluteTableIdentifier)
+
+    val keyRdd = deleteRdd.map({ row =>
+      val tupleId: String = row
+        .getString(row.fieldIndex(CarbonCommonConstants.CARBON_IMPLICIT_COLUMN_TUPLEID))
+      val key = CarbonUpdateUtil.getSegmentWithBlockFromTID(tupleId)
+      (key, row)
+    }).groupByKey()
+
+    // if no loads are present then no need to do anything.
+    if (keyRdd.partitions.size == 0) {
+      return true
+    }
+
+    var blockMappingVO = carbonInputFormat.getBlockRowCount(job, absoluteTableIdentifier)
+    val segmentUpdateStatusMngr = new SegmentUpdateStatusManager(absoluteTableIdentifier)
+    CarbonUpdateUtil
+      .createBlockDetailsMap(blockMappingVO, segmentUpdateStatusMngr)
+
+    val rowContRdd = sparkSession.sparkContext.parallelize(blockMappingVO.getCompleteBlockRowDetailVO.asScala.toSeq,
+      keyRdd.partitions.size)
+
+//    val rowContRdd = sqlContext.sparkContext
+//      .parallelize(blockMappingVO.getCompleteBlockRowDetailVO.asScala.toSeq,
+//        keyRdd.partitions.size)
+
+    val rdd = rowContRdd.join(keyRdd)
+
+    // rdd.collect().foreach(println)
+
+    res = rdd.mapPartitionsWithIndex(
+      (index: Int, records: Iterator[((String), (RowCountDetailsVO, Iterable[Row]))]) =>
+        Iterator[List[(String, (SegmentUpdateDetails, ExecutionErrors))]] {
+
+          var result = List[(String, (SegmentUpdateDetails, ExecutionErrors))]()
+          while (records.hasNext) {
+            val ((key), (rowCountDetailsVO, groupedRows)) = records.next
+            result = result ++
+              deleteDeltaFunc(index,
+                key,
+                groupedRows.toIterator,
+                timestamp,
+                rowCountDetailsVO)
+
+          }
+          result
+        }
+    ).collect()
+
+    // if no loads are present then no need to do anything.
+    if (res.isEmpty) {
+      return true
+    }
+
+    // update new status file
+    checkAndUpdateStatusFiles
+
+    // all or none : update status file, only if complete delete opeartion is successfull.
+    def checkAndUpdateStatusFiles: Unit = {
+      val blockUpdateDetailsList = new util.ArrayList[SegmentUpdateDetails]()
+      val segmentDetails = new util.HashSet[String]()
+      res.foreach(resultOfSeg => resultOfSeg.foreach(
+        resultOfBlock => {
+          if (resultOfBlock._1.equalsIgnoreCase(CarbonCommonConstants.STORE_LOADSTATUS_SUCCESS)) {
+            blockUpdateDetailsList.add(resultOfBlock._2._1)
+            segmentDetails.add(resultOfBlock._2._1.getSegmentName)
+            // if this block is invalid then decrement block count in map.
+            if (CarbonUpdateUtil.isBlockInvalid(resultOfBlock._2._1.getStatus)) {
+              CarbonUpdateUtil.decrementDeletedBlockCount(resultOfBlock._2._1,
+                blockMappingVO.getSegmentNumberOfBlockMapping)
+            }
+          }
+          else {
+            deleteStatus = false
+            // In case of failure , clean all related delete delta files
+            CarbonUpdateUtil.cleanStaleDeltaFiles(carbonTable, timestamp)
+            LOGGER.audit(s"Delete data operation is failed for ${ database }.${ tableName }")
+            val errorMsg =
+              "Delete data operation is failed due to failure in creating delete delta file for " +
+                "segment : " + resultOfBlock._2._1.getSegmentName + " block : " +
+                resultOfBlock._2._1.getBlockName
+            executorErrors.failureCauses = resultOfBlock._2._2.failureCauses
+            executorErrors.errorMsg = resultOfBlock._2._2.errorMsg
+
+            if (executorErrors.failureCauses == FailureCauses.NONE) {
+              executorErrors.failureCauses = FailureCauses.EXECUTOR_FAILURE
+              executorErrors.errorMsg = errorMsg
+            }
+            LOGGER.error(errorMsg)
+            return
+          }
+        }
+      )
+      )
+
+      val listOfSegmentToBeMarkedDeleted = CarbonUpdateUtil
+        .getListOfSegmentsToMarkDeleted(blockMappingVO.getSegmentNumberOfBlockMapping)
+
+
+
+      // this is delete flow so no need of putting timestamp in the status file.
+      if (CarbonUpdateUtil
+        .updateSegmentStatus(blockUpdateDetailsList, carbonTable, timestamp, false) &&
+        CarbonUpdateUtil
+          .updateTableMetadataStatus(segmentDetails,
+            carbonTable,
+            timestamp,
+            !isUpdateOperation,
+            listOfSegmentToBeMarkedDeleted)
+      ) {
+        LOGGER.info(s"Delete data operation is successful for ${ database }.${ tableName }")
+        LOGGER.audit(s"Delete data operation is successful for ${ database }.${ tableName }")
+      }
+      else {
+        // In case of failure , clean all related delete delta files
+        CarbonUpdateUtil.cleanStaleDeltaFiles(carbonTable, timestamp)
+
+        val errorMessage = "Delete data operation is failed due to failure " +
+          "in table status updation."
+        LOGGER.audit(s"Delete data operation is failed for ${ database }.${ tableName }")
+        LOGGER.error("Delete data operation is failed due to failure in table status updation.")
+        executorErrors.failureCauses = FailureCauses.STATUS_FILE_UPDATION_FAILURE
+        executorErrors.errorMsg = errorMessage
+        // throw new Exception(errorMessage)
+      }
+    }
+
+    def deleteDeltaFunc(index: Int,
+                        key: String,
+                        iter: Iterator[Row],
+                        timestamp: String,
+                        rowCountDetailsVO: RowCountDetailsVO):
+    Iterator[(String, (SegmentUpdateDetails, ExecutionErrors))] = {
+
+      val result = new DeleteDelataResultImpl()
+      var deleteStatus = CarbonCommonConstants.STORE_LOADSTATUS_FAILURE
+      val LOGGER = LogServiceFactory.getLogService(this.getClass.getName)
+      // here key = segment/blockName
+      val blockName = CarbonUpdateUtil
+        .getBlockName(
+          CarbonTablePath.addDataPartPrefix(key.split(CarbonCommonConstants.FILE_SEPARATOR)(1)))
+      val segmentId = key.split(CarbonCommonConstants.FILE_SEPARATOR)(0)
+      var deleteDeltaBlockDetails: DeleteDeltaBlockDetails = new DeleteDeltaBlockDetails(blockName)
+      val resultIter = new Iterator[(String, (SegmentUpdateDetails, ExecutionErrors))] {
+        val segmentUpdateDetails = new SegmentUpdateDetails()
+        var TID = ""
+        var countOfRows = 0
+        try {
+          while (iter.hasNext) {
+            val oneRow = iter.next
+            TID = oneRow
+              .get(oneRow.fieldIndex(CarbonCommonConstants.CARBON_IMPLICIT_COLUMN_TUPLEID)).toString
+            val offset = CarbonUpdateUtil.getRequiredFieldFromTID(TID, TupleIdEnum.OFFSET)
+            val blockletId = CarbonUpdateUtil
+              .getRequiredFieldFromTID(TID, TupleIdEnum.BLOCKLET_ID)
+            val IsValidOffset = deleteDeltaBlockDetails.addBlocklet(blockletId, offset)
+            // stop delete operation
+            if(!IsValidOffset) {
+              executorErrors.failureCauses = FailureCauses.MULTIPLE_INPUT_ROWS_MATCHING
+              executorErrors.errorMsg = "Multiple input rows matched for same row."
+              throw new MultipleMatchingException("Multiple input rows matched for same row.")
+            }
+            countOfRows = countOfRows + 1
+          }
+
+          val blockPath = CarbonUpdateUtil.getTableBlockPath(TID, factPath)
+          val completeBlockName = CarbonTablePath
+            .addDataPartPrefix(CarbonUpdateUtil.getRequiredFieldFromTID(TID, TupleIdEnum.BLOCK_ID) +
+              CarbonCommonConstants.FACT_FILE_EXT)
+          val deleteDeletaPath = CarbonUpdateUtil
+            .getDeleteDeltaFilePath(blockPath, blockName, timestamp)
+          val carbonDeleteWriter = new CarbonDeleteDeltaWriterImpl(deleteDeletaPath,
+            FileFactory.getFileType(deleteDeletaPath))
+
+
+
+          segmentUpdateDetails.setBlockName(blockName)
+          segmentUpdateDetails.setActualBlockName(completeBlockName)
+          segmentUpdateDetails.setSegmentName(segmentId)
+          segmentUpdateDetails.setDeleteDeltaEndTimestamp(timestamp)
+          segmentUpdateDetails.setDeleteDeltaStartTimestamp(timestamp)
+
+          val alreadyDeletedRows: Long = rowCountDetailsVO.getDeletedRowsInBlock
+          val totalDeletedRows: Long = alreadyDeletedRows + countOfRows
+          segmentUpdateDetails.setDeletedRowsInBlock(totalDeletedRows.toString)
+          if (totalDeletedRows == rowCountDetailsVO.getTotalNumberOfRows) {
+            segmentUpdateDetails.setStatus(CarbonCommonConstants.MARKED_FOR_DELETE)
+          }
+          else {
+            // write the delta file
+            carbonDeleteWriter.write(deleteDeltaBlockDetails)
+          }
+
+          deleteStatus = CarbonCommonConstants.STORE_LOADSTATUS_SUCCESS
+        } catch {
+          case e : MultipleMatchingException =>
+            LOGGER.audit(e.getMessage)
+            LOGGER.error(e.getMessage)
+          // dont throw exception here.
+          case e: Exception =>
+            val errorMsg = s"Delete data operation is failed for ${ database }.${ tableName }."
+            LOGGER.audit(errorMsg)
+            LOGGER.error(errorMsg + e.getMessage)
+            throw e
+        }
+
+
+        var finished = false
+
+        override def hasNext: Boolean = {
+          if (!finished) {
+            finished = true
+            finished
+          }
+          else {
+            !finished
+          }
+        }
+
+        override def next(): (String, (SegmentUpdateDetails, ExecutionErrors)) = {
+          finished = true
+          result.getKey(deleteStatus, (segmentUpdateDetails, executorErrors))
+        }
+      }
+      resultIter
+    }
+    true
+  }
+}
+
+
+
+object UpdateExecution {
+
+  def performUpdate(
+         dataFrame: Dataset[Row],
+         tableIdentifier: Seq[String],
+         plan: LogicalPlan,
+         sparkSession: SparkSession,
+         currentTime: Long,
+         executorErrors: ExecutionErrors): Unit = {
+
+    def isDestinationRelation(relation: CarbonDatasourceHadoopRelation): Boolean = {
+
+      val tableName = relation.absIdentifier.getCarbonTableIdentifier.getTableName
+      val dbName = relation.absIdentifier.getCarbonTableIdentifier.getDatabaseName
+      (tableIdentifier.size > 1 &&
+        tableIdentifier(0) == dbName &&
+        tableIdentifier(1) == tableName) ||
+        (tableIdentifier(0) == tableName)
+    }
+    def getHeader(relation: CarbonDatasourceHadoopRelation, plan: LogicalPlan): String = {
+      var header = ""
+      var found = false
+
+      plan match {
+        case Project(pList, _) if (!found) =>
+          found = true
+          header = pList
+            .filter(field => !field.name
+              .equalsIgnoreCase(CarbonCommonConstants.CARBON_IMPLICIT_COLUMN_TUPLEID))
+            .map(col => if (col.name.endsWith(CarbonCommonConstants.UPDATED_COL_EXTENSION)) {
+              col.name
+                .substring(0, col.name.lastIndexOf(CarbonCommonConstants.UPDATED_COL_EXTENSION))
+            }
+            else {
+              col.name
+            }).mkString(",")
+      }
+      header
+    }
+    val ex = dataFrame.queryExecution.analyzed
+    val res = ex find {
+      case relation: LogicalRelation if (relation.relation.isInstanceOf[CarbonDatasourceHadoopRelation] &&
+        isDestinationRelation(relation.relation
+          .asInstanceOf[CarbonDatasourceHadoopRelation])) =>
+        true
+      case _ => false
+    }
+    val carbonRelation: CarbonDatasourceHadoopRelation = res match {
+      case Some(relation: LogicalRelation) =>
+        relation.relation.asInstanceOf[CarbonDatasourceHadoopRelation]
+      case _ => sys.error("")
+    }
+
+    val updateTableModel = UpdateTableModel(true, currentTime, executorErrors)
+
+    val header = getHeader(carbonRelation, plan)
+
+    LoadTable(
+      Some(carbonRelation.absIdentifier.getCarbonTableIdentifier.getDatabaseName),
+      carbonRelation.absIdentifier.getCarbonTableIdentifier.getTableName,
+      null,
+      Seq(),
+      Map(("fileheader" -> header)),
+      false,
+      null,
+      Some(dataFrame),
+      Some(updateTableModel)).run(sparkSession)
+
+    executorErrors.errorMsg = updateTableModel.executorErrors.errorMsg
+    executorErrors.failureCauses = updateTableModel.executorErrors.failureCauses
+
+    Seq.empty
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/b2026970/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonAnalysisRules.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonAnalysisRules.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonAnalysisRules.scala
index 6061e3e..7d94c92 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonAnalysisRules.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonAnalysisRules.scala
@@ -18,9 +18,14 @@
 package org.apache.spark.sql.hive
 
 import org.apache.spark.sql._
+import org.apache.spark.sql.catalyst.CarbonTableIdentifierImplicit
+import org.apache.spark.sql.catalyst.analysis.{UnresolvedAlias, UnresolvedAttribute, UnresolvedFunction, UnresolvedRelation, UnresolvedStar}
 import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, Cast, NamedExpression}
+import org.apache.spark.sql.catalyst.plans.Inner
 import org.apache.spark.sql.catalyst.plans.logical._
 import org.apache.spark.sql.catalyst.rules._
+import org.apache.spark.sql.execution.command.ProjectForDeleteCommand
+import org.apache.spark.sql.execution.{ProjectExec, SparkSqlParser, SubqueryExec}
 import org.apache.spark.sql.execution.datasources.LogicalRelation
 
 import org.apache.carbondata.core.constants.CarbonCommonConstants
@@ -73,3 +78,136 @@ object CarbonPreInsertionCasts extends Rule[LogicalPlan] {
     }
   }
 }
+
+object CarbonIUDAnalysisRule extends Rule[LogicalPlan] {
+
+  var sparkSession: SparkSession = _
+
+  def init(sparkSession: SparkSession) {
+     this.sparkSession = sparkSession
+  }
+
+  private def processUpdateQuery(
+      table: UnresolvedRelation,
+      columns: List[String],
+      selectStmt: String,
+      filter: String): LogicalPlan = {
+    var includedDestColumns = false
+    var includedDestRelation = false
+    var addedTupleId = false
+
+    def prepareTargetReleation(relation: UnresolvedRelation): SubqueryAlias = {
+      val tupleId = UnresolvedAlias(Alias(UnresolvedFunction("getTupleId",
+        Seq.empty, isDistinct = false), "tupleId")())
+      val projList = Seq(
+        UnresolvedAlias(UnresolvedStar(Option(table.alias.toSeq))), tupleId)
+      // include tuple id and rest of the required columns in subqury
+      SubqueryAlias(table.alias.getOrElse(""), Project(projList, relation), Option(table.tableIdentifier))
+    }
+    // get the un-analyzed logical plan
+    val targetTable = prepareTargetReleation(table)
+    val selectPlan = new SparkSqlParser(sparkSession.sessionState.conf).parsePlan(selectStmt) transform {
+      case Project(projectList, child) if (!includedDestColumns) =>
+        includedDestColumns = true
+        if (projectList.size != columns.size) {
+          sys.error("Number of source and destination columns are not matching")
+        }
+        val renamedProjectList = projectList.zip(columns).map{ case(attr, col) =>
+          attr match {
+            case UnresolvedAlias(child22, _) =>
+              UnresolvedAlias(Alias(child22, col + "-updatedColumn")())
+            case UnresolvedAttribute(param) =>
+              UnresolvedAlias(Alias(attr, col + "-updatedColumn")())
+             // UnresolvedAttribute(col + "-updatedColumn")
+//              UnresolvedAlias(Alias(child, col + "-updatedColumn")())
+            case _ => attr
+          }
+        }
+        val list = Seq(
+          UnresolvedAlias(UnresolvedStar(Option(table.alias.toSeq)))) ++ renamedProjectList
+        Project(list, child)
+      case Filter(cond, child) if (!includedDestRelation) =>
+        includedDestRelation = true
+        Filter(cond, Join(child, targetTable, Inner, None))
+      case r @ UnresolvedRelation(t, a) if (!includedDestRelation &&
+                                            t != table.tableIdentifier) =>
+        includedDestRelation = true
+        Join(r, targetTable, Inner, None)
+    }
+    val updatedSelectPlan : LogicalPlan = if (!includedDestRelation) {
+      // special case to handle self join queries
+      // Eg. update tableName  SET (column1) = (column1+1)
+      selectPlan transform {
+        case relation: UnresolvedRelation if (table.tableIdentifier == relation.tableIdentifier &&
+                                              addedTupleId == false) =>
+          addedTupleId = true
+          targetTable
+      }
+    } else {
+      selectPlan
+    }
+    val finalPlan = if (filter.length > 0) {
+      val alias = table.alias.getOrElse("")
+      var transformed: Boolean = false
+      // Create a dummy projection to include filter conditions
+      var newPlan: LogicalPlan = null
+      if (table.tableIdentifier.database.isDefined) {
+        newPlan = new SparkSqlParser(sparkSession.sessionState.conf).parsePlan("select * from  " +
+                                                                     table.tableIdentifier.database
+                                                                       .getOrElse("") + "." +
+                                                                     table.tableIdentifier.table +
+                                                                     " " + alias + " " +
+                                                                     filter)
+      }
+      else {
+        newPlan = new SparkSqlParser(sparkSession.sessionState.conf).parsePlan("select * from  " +
+                                                                     table.tableIdentifier.table +
+                                                                     " " + alias + " " +
+                                                                     filter)
+      }
+      newPlan transform {
+        case UnresolvedRelation(t, Some(a)) if (
+          !transformed && t == table.tableIdentifier && a == alias) =>
+          transformed = true
+          // Add the filter condition of update statement  on destination table
+          SubqueryAlias(alias, updatedSelectPlan, Option(table.tableIdentifier))
+      }
+    } else {
+      updatedSelectPlan
+    }
+    val tid = CarbonTableIdentifierImplicit.toTableIdentifier(Seq(table.tableIdentifier.toString()))
+    val tidSeq = Seq(getDB.getDatabaseName(tid.database, sparkSession))
+    val destinationTable = UnresolvedRelation(table.tableIdentifier, table.alias)
+    ProjectForUpdate(destinationTable, columns, Seq(finalPlan))
+  }
+
+  def processDeleteRecordsQuery(selectStmt: String, table: UnresolvedRelation): LogicalPlan = {
+   // val tid = CarbonTableIdentifierImplicit.toTableIdentifier(Seq(table.tableIdentifier.toString()))
+   val tidSeq = Seq(getDB.getDatabaseName(table.tableIdentifier.database, sparkSession),
+     table.tableIdentifier.table)
+    var addedTupleId = false
+    val selectPlan = new SparkSqlParser(sparkSession.sessionState.conf).parsePlan(selectStmt) transform {
+      case relation: UnresolvedRelation if (table.tableIdentifier == relation.tableIdentifier &&
+                                            addedTupleId == false) =>
+        addedTupleId = true
+        val tupleId = UnresolvedAlias(Alias(UnresolvedFunction("getTupleId",
+          Seq.empty, isDistinct = false), "tupleId")())
+        val projList = Seq(
+          UnresolvedAlias(UnresolvedStar(Option(table.alias.toSeq))), tupleId)
+        // include tuple id in subqury
+        Project(projList, relation)
+    }
+    ProjectForDeleteCommand(
+      selectPlan,
+      tidSeq,
+      System.currentTimeMillis().toString)
+  }
+
+  override def apply(logicalplan: LogicalPlan): LogicalPlan = {
+
+    logicalplan transform {
+      case UpdateTable(t, cols, sel, where) => processUpdateQuery(t, cols, sel, where)
+      case DeleteRecords(statement, table) => processDeleteRecordsQuery(statement, table)
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/b2026970/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonSessionState.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonSessionState.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonSessionState.scala
index 687afc4..e413840 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonSessionState.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonSessionState.scala
@@ -67,6 +67,7 @@ class CarbonSessionCatalog(
   lazy val carbonEnv = {
     val env = new CarbonEnv
     env.init(sparkSession)
+    CarbonIUDAnalysisRule.init(sparkSession)
     env
   }
 
@@ -129,6 +130,7 @@ class CarbonSessionState(sparkSession: SparkSession) extends HiveSessionState(sp
         catalog.ParquetConversions ::
         catalog.OrcConversions ::
         CarbonPreInsertionCasts ::
+        CarbonIUDAnalysisRule ::
         AnalyzeCreateTable(sparkSession) ::
         PreprocessTableInsertion(conf) ::
         DataSourceAnalysis(conf) ::

http://git-wip-us.apache.org/repos/asf/carbondata/blob/b2026970/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala
index d1a0c90..cc27181 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala
@@ -27,7 +27,7 @@ import org.apache.spark.sql.catalyst.expressions._
 import org.apache.spark.sql.catalyst.expressions.aggregate._
 import org.apache.spark.sql.catalyst.plans.logical._
 import org.apache.spark.sql.catalyst.rules.Rule
-import org.apache.spark.sql.execution.command.RunnableCommand
+import org.apache.spark.sql.execution.command.{ProjectForUpdateCommand, RunnableCommand}
 import org.apache.spark.sql.execution.datasources.LogicalRelation
 import org.apache.spark.sql.types.{IntegerType, StringType}
 
@@ -69,7 +69,8 @@ class CarbonLateDecodeRule extends Rule[LogicalPlan] with PredicateHelper {
         return plan
       }
       LOGGER.info("Starting to optimize plan")
-      val udfTransformedPlan = pushDownUDFToJoinLeftRelation(plan)
+      val iudPlan = processPlan(plan)
+      val udfTransformedPlan = pushDownUDFToJoinLeftRelation(iudPlan)
       val recorder = CarbonTimeStatisticsFactory.createExecutorRecorder("")
       val queryStatistic = new QueryStatistic()
       val result = transformCarbonPlan(udfTransformedPlan, relations)
@@ -113,6 +114,25 @@ class CarbonLateDecodeRule extends Rule[LogicalPlan] with PredicateHelper {
     output
   }
 
+  private def processPlan(plan: LogicalPlan): LogicalPlan = {
+    plan transform {
+      case ProjectForUpdate(table, cols, Seq(updatePlan)) =>
+        var isTransformed = false
+        val newPlan = updatePlan transform {
+          case Project(pList, child) if (!isTransformed) =>
+            val (dest: Seq[NamedExpression], source: Seq[NamedExpression]) = pList
+              .splitAt(pList.size - cols.size)
+            val diff = cols.diff(dest.map(_.name))
+            if (diff.size > 0) {
+              sys.error(s"Unknown column(s) ${diff.mkString(",")} in table ${table.tableName}")
+            }
+            isTransformed = true
+            Project(dest.filter(a => !cols.contains(a.name)) ++ source, child)
+        }
+        ProjectForUpdateCommand(newPlan, Seq(table.tableIdentifier.toString()))
+    }
+  }
+
   def isOptimized(plan: LogicalPlan): Boolean = {
     plan find {
       case cd: CarbonDictionaryCatalystDecoder => true

http://git-wip-us.apache.org/repos/asf/carbondata/blob/b2026970/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
index d1a764f..367aab4 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
@@ -20,8 +20,10 @@ package org.apache.spark.sql.parser
 import scala.collection.mutable
 import scala.language.implicitConversions
 
-import org.apache.spark.sql.ShowLoadsCommand
-import org.apache.spark.sql.catalyst.CarbonDDLSqlParser
+import org.apache.spark.sql.{DeleteRecords, ShowLoadsCommand, UpdateTable}
+import org.apache.spark.sql.catalyst.{CarbonDDLSqlParser, TableIdentifier}
+import org.apache.spark.sql.catalyst.analysis.UnresolvedRelation
+import org.apache.spark.sql.catalyst.CarbonTableIdentifierImplicit._
 import org.apache.spark.sql.catalyst.plans.logical._
 import org.apache.spark.sql.execution.command._
 import org.apache.spark.sql.types.StructField
@@ -61,7 +63,7 @@ class CarbonSpark2SqlParser extends CarbonDDLSqlParser {
   protected lazy val start: Parser[LogicalPlan] = explainPlan | startCommand
 
   protected lazy val startCommand: Parser[LogicalPlan] =
-    loadManagement| showLoads | alterTable | restructure
+    loadManagement| showLoads | alterTable | restructure | updateTable | deleteRecords
 
   protected lazy val loadManagement: Parser[LogicalPlan] =
     deleteLoadsByID | deleteLoadsByLoadDate | cleanFiles | loadDataNew
@@ -78,6 +80,128 @@ class CarbonSpark2SqlParser extends CarbonDDLSqlParser {
         AlterTableCompaction(altertablemodel)
     }
 
+  protected lazy val deleteRecords: Parser[LogicalPlan] =
+    (DELETE ~> FROM ~> table) ~ restInput.? <~ opt(";") ^^ {
+      case table ~ rest =>
+        val tableName = getTableName(table.tableIdentifier)
+        val alias = table.alias.getOrElse("")
+        DeleteRecords("select tupleId from " + tableName + " " + alias + rest.getOrElse(""), table)
+    }
+
+  protected lazy val updateTable: Parser[LogicalPlan] =
+    UPDATE ~> table ~
+    (SET ~> "(" ~> repsep(element, ",") <~ ")") ~
+    ("=" ~> restInput) <~ opt(";") ^^ {
+      case tab ~ columns ~ rest =>
+        val (sel, where) = splitQuery(rest)
+        val (selectStmt, relation) =
+          if (!sel.toLowerCase.startsWith("select ")) {
+            if (sel.trim.isEmpty) {
+              sys.error("At least one source column has to be specified ")
+            }
+            // only list of expression are given, need to convert that list of expressions into
+            // select statement on destination table
+            val relation = tab match {
+              case r@UnresolvedRelation(tableIdentifier, alias) =>
+                updateRelation(r, tableIdentifier, alias)
+              case _ => tab
+            }
+            ("select " + sel + " from " + getTableName(relation.tableIdentifier) + " " +
+             relation.alias.get, relation)
+          } else {
+            (sel, updateRelation(tab, tab.tableIdentifier, tab.alias))
+          }
+        UpdateTable(relation, columns, selectStmt, where)
+    }
+
+  private def updateRelation(
+      r: UnresolvedRelation,
+      tableIdentifier: Seq[String],
+      alias: Option[String]): UnresolvedRelation = {
+    alias match {
+      case Some(_) => r
+      case _ =>
+        val tableAlias = tableIdentifier match {
+          case Seq(dbName, tableName) => Some(tableName)
+          case Seq(tableName) => Some(tableName)
+        }
+        UnresolvedRelation(tableIdentifier, Option(tableAlias.toString))
+    }
+  }
+
+  protected lazy val element: Parser[String] =
+    (ident <~ ".").? ~ ident ^^ {
+      case table ~ column => column.toLowerCase
+    }
+
+  protected lazy val table: Parser[UnresolvedRelation] = {
+    rep1sep(attributeName, ".") ~ opt(ident) ^^ {
+      case tableIdent ~ alias => UnresolvedRelation(tableIdent, alias)
+    }
+  }
+
+  private def splitQuery(query: String): (String, String) = {
+    val stack = scala.collection.mutable.Stack[Char]()
+    var foundSingleQuotes = false
+    var foundDoubleQuotes = false
+    var foundEscapeChar = false
+    var ignoreChar = false
+    var stop = false
+    var bracketCount = 0
+    val (selectStatement, where) = query.span {
+      ch => {
+        if (stop) {
+          false
+        } else {
+          ignoreChar = false
+          if (foundEscapeChar && (ch == '\'' || ch == '\"' || ch == '\\')) {
+            foundEscapeChar = false
+            ignoreChar = true
+          }
+          // If escaped single or double quotes found, no need to consider
+          if (!ignoreChar) {
+            if (ch == '\\') {
+              foundEscapeChar = true
+            } else if (ch == '\'') {
+              foundSingleQuotes = !foundSingleQuotes
+            } else if (ch == '\"') {
+              foundDoubleQuotes = !foundDoubleQuotes
+            }
+            else if (ch == '(' && !foundSingleQuotes && !foundDoubleQuotes) {
+              bracketCount = bracketCount + 1
+              stack.push(ch)
+            } else if (ch == ')' && !foundSingleQuotes && !foundDoubleQuotes) {
+              bracketCount = bracketCount + 1
+              stack.pop()
+              if (0 == stack.size) {
+                stop = true
+              }
+            }
+          }
+          true
+        }
+      }
+    }
+    if (bracketCount == 0 || bracketCount % 2 != 0) {
+      sys.error("Parsing error, missing bracket ")
+    }
+    val select = selectStatement.trim
+    (select.substring(1, select.length - 1).trim -> where.trim)
+  }
+
+  protected lazy val attributeName: Parser[String] = acceptMatch("attribute name", {
+    case lexical.Identifier(str) => str.toLowerCase
+    case lexical.Keyword(str) if !lexical.delimiters.contains(str) => str.toLowerCase
+  })
+
+  private def getTableName(tableIdentifier: Seq[String]): String = {
+    if (tableIdentifier.size > 1) {
+      tableIdentifier(0) + "." + tableIdentifier(1)
+    } else {
+      tableIdentifier(0)
+    }
+  }
+
 
   protected lazy val loadDataNew: Parser[LogicalPlan] =
     LOAD ~> DATA ~> opt(LOCAL) ~> INPATH ~> stringLit ~ opt(OVERWRITE) ~


[37/42] carbondata git commit: Adding the Pages support in the Delete Method.

Posted by ra...@apache.org.
Adding the Pages support in the Delete Method.

correcting the size of the vector batch excluding the filtered rows.

changing page id from string to integer.


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/bbf5dc18
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/bbf5dc18
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/bbf5dc18

Branch: refs/heads/branch-1.1
Commit: bbf5dc1815e34921b52e9d15c6552e04dcd114d6
Parents: 2c83e02
Author: ravikiran23 <ra...@gmail.com>
Authored: Fri Jun 2 20:31:57 2017 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 13:26:50 2017 +0530

----------------------------------------------------------------------
 .../BlockletLevelDeleteDeltaDataCache.java      | 28 +++++++++++++-------
 .../core/mutate/DeleteDeltaBlockDetails.java    |  4 +--
 .../core/mutate/DeleteDeltaBlockletDetails.java | 11 ++++++--
 .../carbondata/core/mutate/TupleIdEnum.java     |  3 ++-
 .../data/BlockletDeleteDeltaCacheLoader.java    | 11 +++++---
 .../reader/CarbonDeleteFilesDataReader.java     | 25 ++++++++++-------
 .../impl/DictionaryBasedResultCollector.java    |  3 ++-
 .../DictionaryBasedVectorResultCollector.java   |  7 +++--
 .../collector/impl/RawBasedResultCollector.java |  3 ++-
 ...structureBasedDictionaryResultCollector.java |  3 ++-
 .../RestructureBasedRawResultCollector.java     |  3 ++-
 .../RestructureBasedVectorResultCollector.java  |  3 +++
 .../core/scan/result/AbstractScannedResult.java | 13 ++++++---
 .../SegmentUpdateStatusManager.java             |  2 +-
 .../sql/execution/command/IUDCommands.scala     |  4 ++-
 .../sql/execution/command/IUDCommands.scala     |  4 ++-
 16 files changed, 84 insertions(+), 43 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/bbf5dc18/core/src/main/java/org/apache/carbondata/core/cache/update/BlockletLevelDeleteDeltaDataCache.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/cache/update/BlockletLevelDeleteDeltaDataCache.java b/core/src/main/java/org/apache/carbondata/core/cache/update/BlockletLevelDeleteDeltaDataCache.java
index 5d2e8ce..abad924 100644
--- a/core/src/main/java/org/apache/carbondata/core/cache/update/BlockletLevelDeleteDeltaDataCache.java
+++ b/core/src/main/java/org/apache/carbondata/core/cache/update/BlockletLevelDeleteDeltaDataCache.java
@@ -17,26 +17,36 @@
 
 package org.apache.carbondata.core.cache.update;
 
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+
 import org.roaringbitmap.RoaringBitmap;
 
 /**
  * This class maintains delete delta data cache of each blocklet along with the block timestamp
  */
 public class BlockletLevelDeleteDeltaDataCache {
-  private RoaringBitmap deleteDelataDataCache;
+  private Map<Integer, RoaringBitmap> deleteDelataDataCache =
+      new HashMap<>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
   private String timeStamp;
 
-  public BlockletLevelDeleteDeltaDataCache(int[] deleteDeltaFileData, String timeStamp) {
-    deleteDelataDataCache = RoaringBitmap.bitmapOf(deleteDeltaFileData);
+  public BlockletLevelDeleteDeltaDataCache(Map<Integer, Integer[]> deleteDeltaFileData,
+      String timeStamp) {
+    for (Map.Entry<Integer, Integer[]> entry : deleteDeltaFileData.entrySet()) {
+      int[] dest = new int[entry.getValue().length];
+      int i = 0;
+      for (Integer val : entry.getValue()) {
+        dest[i++] = val.intValue();
+      }
+      deleteDelataDataCache.put(entry.getKey(), RoaringBitmap.bitmapOf(dest));
+    }
     this.timeStamp = timeStamp;
   }
 
-  public boolean contains(int key) {
-    return deleteDelataDataCache.contains(key);
-  }
-
-  public int getSize() {
-    return deleteDelataDataCache.getCardinality();
+  public boolean contains(int key, Integer pageId) {
+    return deleteDelataDataCache.get(pageId).contains(key);
   }
 
   public String getCacheTimeStamp() {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bbf5dc18/core/src/main/java/org/apache/carbondata/core/mutate/DeleteDeltaBlockDetails.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/mutate/DeleteDeltaBlockDetails.java b/core/src/main/java/org/apache/carbondata/core/mutate/DeleteDeltaBlockDetails.java
index c4e9ea2..0f66d7e 100644
--- a/core/src/main/java/org/apache/carbondata/core/mutate/DeleteDeltaBlockDetails.java
+++ b/core/src/main/java/org/apache/carbondata/core/mutate/DeleteDeltaBlockDetails.java
@@ -80,8 +80,8 @@ public class DeleteDeltaBlockDetails implements Serializable {
     }
   }
 
-  public boolean addBlocklet(String blockletId, String offset) throws Exception {
-    DeleteDeltaBlockletDetails blocklet = new DeleteDeltaBlockletDetails(blockletId);
+  public boolean addBlocklet(String blockletId, String offset, Integer pageId) throws Exception {
+    DeleteDeltaBlockletDetails blocklet = new DeleteDeltaBlockletDetails(blockletId, pageId);
     try {
       blocklet.addDeletedRow(CarbonUpdateUtil.getIntegerValue(offset));
       return addBlockletDetails(blocklet);

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bbf5dc18/core/src/main/java/org/apache/carbondata/core/mutate/DeleteDeltaBlockletDetails.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/mutate/DeleteDeltaBlockletDetails.java b/core/src/main/java/org/apache/carbondata/core/mutate/DeleteDeltaBlockletDetails.java
index 5418211..7df5f22 100644
--- a/core/src/main/java/org/apache/carbondata/core/mutate/DeleteDeltaBlockletDetails.java
+++ b/core/src/main/java/org/apache/carbondata/core/mutate/DeleteDeltaBlockletDetails.java
@@ -31,6 +31,8 @@ public class DeleteDeltaBlockletDetails implements Serializable {
 
   private static final long serialVersionUID = 1206104914911491724L;
   private String id;
+  private Integer pageId;
+
   private Set<Integer> deletedRows;
 
   /**
@@ -39,9 +41,10 @@ public class DeleteDeltaBlockletDetails implements Serializable {
   private static final LogService LOGGER =
       LogServiceFactory.getLogService(DeleteDeltaBlockletDetails.class.getName());
 
-  public DeleteDeltaBlockletDetails(String id) {
+  public DeleteDeltaBlockletDetails(String id, Integer pageId) {
     this.id = id;
     deletedRows = new TreeSet<Integer>();
+    this.pageId = pageId;
   }
 
   public boolean addDeletedRows(Set<Integer> rows) {
@@ -60,6 +63,10 @@ public class DeleteDeltaBlockletDetails implements Serializable {
     this.id = id;
   }
 
+  public Integer getPageId() {
+    return pageId;
+  }
+
   public Set<Integer> getDeletedRows() {
     return deletedRows;
   }
@@ -73,7 +80,7 @@ public class DeleteDeltaBlockletDetails implements Serializable {
     }
 
     DeleteDeltaBlockletDetails that = (DeleteDeltaBlockletDetails) obj;
-    return id.equals(that.id);
+    return id.equals(that.id) && pageId == that.pageId;
   }
 
   @Override public int hashCode() {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bbf5dc18/core/src/main/java/org/apache/carbondata/core/mutate/TupleIdEnum.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/mutate/TupleIdEnum.java b/core/src/main/java/org/apache/carbondata/core/mutate/TupleIdEnum.java
index 0c1318c..e8c60b3 100644
--- a/core/src/main/java/org/apache/carbondata/core/mutate/TupleIdEnum.java
+++ b/core/src/main/java/org/apache/carbondata/core/mutate/TupleIdEnum.java
@@ -24,7 +24,8 @@ public enum TupleIdEnum {
   SEGMENT_ID(1),
   BLOCK_ID(2),
   BLOCKLET_ID(3),
-  OFFSET(4);
+  PAGE_ID(4),
+  OFFSET(5);
 
   private int index;
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bbf5dc18/core/src/main/java/org/apache/carbondata/core/mutate/data/BlockletDeleteDeltaCacheLoader.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/mutate/data/BlockletDeleteDeltaCacheLoader.java b/core/src/main/java/org/apache/carbondata/core/mutate/data/BlockletDeleteDeltaCacheLoader.java
index 6665c5b..309e486 100644
--- a/core/src/main/java/org/apache/carbondata/core/mutate/data/BlockletDeleteDeltaCacheLoader.java
+++ b/core/src/main/java/org/apache/carbondata/core/mutate/data/BlockletDeleteDeltaCacheLoader.java
@@ -17,6 +17,8 @@
 
 package org.apache.carbondata.core.mutate.data;
 
+import java.util.Map;
+
 import org.apache.carbondata.common.logging.LogService;
 import org.apache.carbondata.common.logging.LogServiceFactory;
 import org.apache.carbondata.core.cache.update.BlockletLevelDeleteDeltaDataCache;
@@ -35,8 +37,8 @@ public class BlockletDeleteDeltaCacheLoader implements DeleteDeltaCacheLoaderInt
   private static final LogService LOGGER =
       LogServiceFactory.getLogService(BlockletDeleteDeltaCacheLoader.class.getName());
 
-  public BlockletDeleteDeltaCacheLoader(String blockletID,
-       DataRefNode blockletNode, AbsoluteTableIdentifier absoluteIdentifier) {
+  public BlockletDeleteDeltaCacheLoader(String blockletID, DataRefNode blockletNode,
+      AbsoluteTableIdentifier absoluteIdentifier) {
     this.blockletID = blockletID;
     this.blockletNode = blockletNode;
     this.absoluteIdentifier = absoluteIdentifier;
@@ -49,11 +51,12 @@ public class BlockletDeleteDeltaCacheLoader implements DeleteDeltaCacheLoaderInt
   public void loadDeleteDeltaFileDataToCache() {
     SegmentUpdateStatusManager segmentUpdateStatusManager =
         new SegmentUpdateStatusManager(absoluteIdentifier);
-    int[] deleteDeltaFileData = null;
+    Map<Integer, Integer[]> deleteDeltaFileData = null;
     BlockletLevelDeleteDeltaDataCache deleteDeltaDataCache = null;
     if (null == blockletNode.getDeleteDeltaDataCache()) {
       try {
-        deleteDeltaFileData = segmentUpdateStatusManager.getDeleteDeltaDataFromAllFiles(blockletID);
+        deleteDeltaFileData =
+            segmentUpdateStatusManager.getDeleteDeltaDataFromAllFiles(blockletID);
         deleteDeltaDataCache = new BlockletLevelDeleteDeltaDataCache(deleteDeltaFileData,
             segmentUpdateStatusManager.getTimestampForRefreshCache(blockletID, null));
       } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bbf5dc18/core/src/main/java/org/apache/carbondata/core/reader/CarbonDeleteFilesDataReader.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/reader/CarbonDeleteFilesDataReader.java b/core/src/main/java/org/apache/carbondata/core/reader/CarbonDeleteFilesDataReader.java
index 89219e1..e689566 100644
--- a/core/src/main/java/org/apache/carbondata/core/reader/CarbonDeleteFilesDataReader.java
+++ b/core/src/main/java/org/apache/carbondata/core/reader/CarbonDeleteFilesDataReader.java
@@ -19,9 +19,10 @@ package org.apache.carbondata.core.reader;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
-import java.util.TreeSet;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
@@ -36,8 +37,6 @@ import org.apache.carbondata.core.mutate.DeleteDeltaBlockDetails;
 import org.apache.carbondata.core.mutate.DeleteDeltaBlockletDetails;
 import org.apache.carbondata.core.util.CarbonProperties;
 
-import org.apache.commons.lang.ArrayUtils;
-
 
 /**
  * This class perform the functionality of reading multiple delete delta files
@@ -80,8 +79,8 @@ public class CarbonDeleteFilesDataReader {
    * @return
    * @throws Exception
    */
-  public int[] getDeleteDataFromAllFiles(List<String> deltaFiles, String blockletId)
-      throws Exception {
+  public Map<Integer, Integer[]> getDeleteDataFromAllFiles(List<String> deltaFiles,
+      String blockletId) throws Exception {
 
     List<Future<DeleteDeltaBlockDetails>> taskSubmitList = new ArrayList<>();
     ExecutorService executorService = Executors.newFixedThreadPool(thread_pool_size);
@@ -101,20 +100,26 @@ public class CarbonDeleteFilesDataReader {
       LOGGER.error("Error while reading the delete delta files : " + e.getMessage());
     }
 
-    Set<Integer> result = new TreeSet<Integer>();
+    Map<Integer, Integer[]> pageIdDeleteRowsMap =
+        new HashMap<>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
     for (int i = 0; i < taskSubmitList.size(); i++) {
       try {
         List<DeleteDeltaBlockletDetails> blockletDetails =
             taskSubmitList.get(i).get().getBlockletDetails();
-        result.addAll(
-            blockletDetails.get(blockletDetails.indexOf(new DeleteDeltaBlockletDetails(blockletId)))
-                .getDeletedRows());
+        for (DeleteDeltaBlockletDetails eachBlockletDetails : blockletDetails) {
+          Integer pageId = eachBlockletDetails.getPageId();
+          Set<Integer> rows = blockletDetails
+              .get(blockletDetails.indexOf(new DeleteDeltaBlockletDetails(blockletId, pageId)))
+              .getDeletedRows();
+          pageIdDeleteRowsMap.put(pageId, rows.toArray(new Integer[rows.size()]));
+        }
+
       } catch (Throwable e) {
         LOGGER.error(e.getMessage());
         throw new Exception(e.getMessage());
       }
     }
-    return ArrayUtils.toPrimitive(result.toArray(new Integer[result.size()]));
+    return pageIdDeleteRowsMap;
 
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bbf5dc18/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedResultCollector.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedResultCollector.java b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedResultCollector.java
index b784f94..d4d16d0 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedResultCollector.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedResultCollector.java
@@ -109,7 +109,7 @@ public class DictionaryBasedResultCollector extends AbstractScannedResultCollect
         scannedResult.incrementCounter();
       }
       if (null != deleteDeltaDataCache && deleteDeltaDataCache
-          .contains(scannedResult.getCurrentRowId())) {
+          .contains(scannedResult.getCurrentRowId(), scannedResult.getCurrentPageCounter())) {
         continue;
       }
       fillMeasureData(scannedResult, row);
@@ -128,6 +128,7 @@ public class DictionaryBasedResultCollector extends AbstractScannedResultCollect
             .equals(queryDimensions[i].getDimension().getColName())) {
           row[order[i]] = DataTypeUtil.getDataBasedOnDataType(
               scannedResult.getBlockletId() + CarbonCommonConstants.FILE_SEPARATOR + scannedResult
+                  .getCurrentPageCounter() + CarbonCommonConstants.FILE_SEPARATOR + scannedResult
                   .getCurrentRowId(), DataType.STRING);
         } else {
           row[order[i]] =

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bbf5dc18/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedVectorResultCollector.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedVectorResultCollector.java b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedVectorResultCollector.java
index 7a8fe06..3203934 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedVectorResultCollector.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedVectorResultCollector.java
@@ -144,9 +144,10 @@ public class DictionaryBasedVectorResultCollector extends AbstractScannedResultC
         return;
       }
       fillColumnVectorDetails(columnarBatch, rowCounter, requiredRows);
-      scannedResult.markFilteredRows(
-          columnarBatch, rowCounter, requiredRows, columnarBatch.getRowCounter());
+      int filteredRows = scannedResult
+          .markFilteredRows(columnarBatch, rowCounter, requiredRows, columnarBatch.getRowCounter());
       scanAndFillResult(scannedResult, columnarBatch, rowCounter, availableRows, requiredRows);
+      columnarBatch.setActualSize(columnarBatch.getActualSize() + requiredRows - filteredRows);
     }
   }
 
@@ -164,8 +165,6 @@ public class DictionaryBasedVectorResultCollector extends AbstractScannedResultC
       // Or set the row counter.
       scannedResult.setRowCounter(rowCounter + requiredRows);
     }
-    columnarBatch.setActualSize(
-        columnarBatch.getActualSize() + requiredRows - columnarBatch.getRowsFilteredCount());
     columnarBatch.setRowCounter(columnarBatch.getRowCounter() + requiredRows);
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bbf5dc18/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RawBasedResultCollector.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RawBasedResultCollector.java b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RawBasedResultCollector.java
index 0af4957..478dc8c 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RawBasedResultCollector.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RawBasedResultCollector.java
@@ -61,7 +61,8 @@ public class RawBasedResultCollector extends AbstractScannedResultCollector {
     while (scannedResult.hasNext() && rowCounter < batchSize) {
       scanResultAndGetData(scannedResult);
       if (null != deleteDeltaDataCache && deleteDeltaDataCache
-          .contains(scannedResult.getCurrentRowId())) {
+          .contains(scannedResult.getCurrentRowId(),
+              scannedResult.getCurrentPageCounter())) {
         continue;
       }
       prepareRow(scannedResult, listBasedResult, queryMeasures);

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bbf5dc18/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RestructureBasedDictionaryResultCollector.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RestructureBasedDictionaryResultCollector.java b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RestructureBasedDictionaryResultCollector.java
index 71045ff..4fa1494 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RestructureBasedDictionaryResultCollector.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RestructureBasedDictionaryResultCollector.java
@@ -81,7 +81,8 @@ public class RestructureBasedDictionaryResultCollector extends DictionaryBasedRe
         scannedResult.incrementCounter();
       }
       if (null != deleteDeltaDataCache && deleteDeltaDataCache
-          .contains(scannedResult.getCurrentRowId())) {
+          .contains(scannedResult.getCurrentRowId(),
+              scannedResult.getCurrentPageCounter())) {
         continue;
       }
       fillMeasureData(scannedResult, row);

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bbf5dc18/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RestructureBasedRawResultCollector.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RestructureBasedRawResultCollector.java b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RestructureBasedRawResultCollector.java
index aa5802d..2de74fa 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RestructureBasedRawResultCollector.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RestructureBasedRawResultCollector.java
@@ -159,7 +159,8 @@ public class RestructureBasedRawResultCollector extends RawBasedResultCollector
     while (scannedResult.hasNext() && rowCounter < batchSize) {
       scanResultAndGetData(scannedResult);
       if (null != deleteDeltaDataCache && deleteDeltaDataCache
-          .contains(scannedResult.getCurrentRowId())) {
+          .contains(scannedResult.getCurrentRowId(),
+              scannedResult.getCurrentPageCounter())) {
         continue;
       }
       // re-fill dictionary and no dictionary key arrays for the newly added columns

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bbf5dc18/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RestructureBasedVectorResultCollector.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RestructureBasedVectorResultCollector.java b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RestructureBasedVectorResultCollector.java
index 3df4541..6f45c47 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RestructureBasedVectorResultCollector.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/RestructureBasedVectorResultCollector.java
@@ -109,11 +109,14 @@ public class RestructureBasedVectorResultCollector extends DictionaryBasedVector
         return;
       }
       fillColumnVectorDetails(columnarBatch, rowCounter, requiredRows);
+      int filteredRows = scannedResult
+          .markFilteredRows(columnarBatch, rowCounter, requiredRows, columnarBatch.getRowCounter());
       // fill default values for non existing dimensions and measures
       fillDataForNonExistingDimensions();
       fillDataForNonExistingMeasures();
       // fill existing dimensions and measures data
       scanAndFillResult(scannedResult, columnarBatch, rowCounter, availableRows, requiredRows);
+      columnarBatch.setActualSize(columnarBatch.getActualSize() + requiredRows - filteredRows);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bbf5dc18/core/src/main/java/org/apache/carbondata/core/scan/result/AbstractScannedResult.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/result/AbstractScannedResult.java b/core/src/main/java/org/apache/carbondata/core/scan/result/AbstractScannedResult.java
index a1074ea..1dda1aa 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/result/AbstractScannedResult.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/result/AbstractScannedResult.java
@@ -284,8 +284,10 @@ public abstract class AbstractScannedResult {
         String data = getBlockletId();
         if (CarbonCommonConstants.CARBON_IMPLICIT_COLUMN_TUPLEID
             .equals(columnVectorInfo.dimension.getColumnName())) {
-          data = data + CarbonCommonConstants.FILE_SEPARATOR +
-              (rowMapping == null ? j : rowMapping[pageCounter][j]);
+          data = data + CarbonCommonConstants.FILE_SEPARATOR + pageCounter
+              + CarbonCommonConstants.FILE_SEPARATOR + (rowMapping == null ?
+              j :
+              rowMapping[pageCounter][j]);
         }
         vector.putBytes(vectorOffset++, offset, data.length(), data.getBytes());
       }
@@ -648,17 +650,20 @@ public abstract class AbstractScannedResult {
    * @param size
    * @param vectorOffset
    */
-  public void markFilteredRows(CarbonColumnarBatch columnarBatch, int startRow, int size,
+  public int markFilteredRows(CarbonColumnarBatch columnarBatch, int startRow, int size,
       int vectorOffset) {
+    int rowsFiltered = 0;
     if (blockletDeleteDeltaCache != null) {
       int len = startRow + size;
       for (int i = startRow; i < len; i++) {
         int rowId = rowMapping != null ? rowMapping[pageCounter][i] : i;
-        if (blockletDeleteDeltaCache.contains(rowId)) {
+        if (blockletDeleteDeltaCache.contains(rowId, pageCounter)) {
           columnarBatch.markFiltered(vectorOffset);
+          rowsFiltered++;
         }
         vectorOffset++;
       }
     }
+    return rowsFiltered;
   }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bbf5dc18/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentUpdateStatusManager.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentUpdateStatusManager.java b/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentUpdateStatusManager.java
index c822935..6fab563 100644
--- a/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentUpdateStatusManager.java
+++ b/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentUpdateStatusManager.java
@@ -254,7 +254,7 @@ public class SegmentUpdateStatusManager {
    * @return
    * @throws Exception
    */
-  public int[] getDeleteDeltaDataFromAllFiles(String tupleId) throws Exception {
+  public Map<Integer, Integer[]> getDeleteDeltaDataFromAllFiles(String tupleId) throws Exception {
     List<String> deltaFiles = getDeltaFiles(tupleId, CarbonCommonConstants.DELETE_DELTA_FILE_EXT);
     CarbonDeleteFilesDataReader dataReader = new CarbonDeleteFilesDataReader();
     String blockletId = CarbonUpdateUtil.getRequiredFieldFromTID(tupleId, TupleIdEnum.BLOCKLET_ID);

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bbf5dc18/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/IUDCommands.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/IUDCommands.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/IUDCommands.scala
index a439c30..a292cde 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/IUDCommands.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/IUDCommands.scala
@@ -689,7 +689,9 @@ object deleteExecution {
             val offset = CarbonUpdateUtil.getRequiredFieldFromTID(TID, TupleIdEnum.OFFSET)
             val blockletId = CarbonUpdateUtil
               .getRequiredFieldFromTID(TID, TupleIdEnum.BLOCKLET_ID)
-            val IsValidOffset = deleteDeltaBlockDetails.addBlocklet(blockletId, offset)
+            val pageId = Integer.parseInt(CarbonUpdateUtil
+              .getRequiredFieldFromTID(TID, TupleIdEnum.PAGE_ID))
+            val IsValidOffset = deleteDeltaBlockDetails.addBlocklet(blockletId, offset, pageId)
             // stop delete operation
             if(!IsValidOffset) {
               executorErrors.failureCauses = FailureCauses.MULTIPLE_INPUT_ROWS_MATCHING

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bbf5dc18/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/IUDCommands.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/IUDCommands.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/IUDCommands.scala
index 01395ff..0894f23 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/IUDCommands.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/IUDCommands.scala
@@ -704,7 +704,9 @@ object deleteExecution {
             val offset = CarbonUpdateUtil.getRequiredFieldFromTID(TID, TupleIdEnum.OFFSET)
             val blockletId = CarbonUpdateUtil
               .getRequiredFieldFromTID(TID, TupleIdEnum.BLOCKLET_ID)
-            val IsValidOffset = deleteDeltaBlockDetails.addBlocklet(blockletId, offset)
+            val pageId = Integer.parseInt(CarbonUpdateUtil
+              .getRequiredFieldFromTID(TID, TupleIdEnum.PAGE_ID))
+            val IsValidOffset = deleteDeltaBlockDetails.addBlocklet(blockletId, offset, pageId)
             // stop delete operation
             if(!IsValidOffset) {
               executorErrors.failureCauses = FailureCauses.MULTIPLE_INPUT_ROWS_MATCHING


[02/42] carbondata git commit: [CARBONDATA-888] Added options to include and exclude dictionary columns in dataframe

Posted by ra...@apache.org.
[CARBONDATA-888] Added options to include and exclude dictionary columns in dataframe


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/01048f86
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/01048f86
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/01048f86

Branch: refs/heads/branch-1.1
Commit: 01048f86f3725df0618b52615a7ba0992d61d412
Parents: f9fb244
Author: Sanoj MG <sa...@gmail.com>
Authored: Tue Apr 18 17:22:14 2017 +0800
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 12:43:48 2017 +0530

----------------------------------------------------------------------
 .../testsuite/dataload/TestLoadDataFrame.scala  | 47 +++++++++++++++++++-
 .../spark/CarbonDataFrameWriter.scala           |  5 +++
 .../spark/sql/CarbonDataFrameWriter.scala       | 29 +++---------
 3 files changed, 56 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/01048f86/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataFrame.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataFrame.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataFrame.scala
index 2d86497..f50620f 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataFrame.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataFrame.scala
@@ -27,6 +27,7 @@ import org.scalatest.BeforeAndAfterAll
 class TestLoadDataFrame extends QueryTest with BeforeAndAfterAll {
   var df: DataFrame = _
   var dataFrame: DataFrame = _
+  var df2: DataFrame = _
 
 
   def buildTestData() = {
@@ -45,6 +46,9 @@ class TestLoadDataFrame extends QueryTest with BeforeAndAfterAll {
       StructField("string", StringType, nullable = false) :: Nil)
 
     dataFrame = sqlContext.createDataFrame(rdd, schema)
+    df2 = sqlContext.sparkContext.parallelize(1 to 1000)
+      .map(x => ("key_" + x, "str_" + x, x, x * 2, x * 3))
+      .toDF("c1", "c2", "c3", "c4", "c5")
   }
 
   def dropTable() = {
@@ -52,7 +56,9 @@ class TestLoadDataFrame extends QueryTest with BeforeAndAfterAll {
     sql("DROP TABLE IF EXISTS carbon2")
     sql("DROP TABLE IF EXISTS carbon3")
     sql("DROP TABLE IF EXISTS carbon4")
-
+    sql("DROP TABLE IF EXISTS carbon5")
+    sql("DROP TABLE IF EXISTS carbon6")
+    sql("DROP TABLE IF EXISTS carbon7")
   }
 
 
@@ -121,6 +127,45 @@ class TestLoadDataFrame extends QueryTest with BeforeAndAfterAll {
       sql("SELECT count(*) FROM carbon2"),Seq(Row(32000)))
   }
 
+  test("test load dataframe with integer columns included in the dictionary"){
+    df2.write
+      .format("carbondata")
+      .option("tableName", "carbon5")
+      .option("compress", "true")
+      .option("dictionary_include","c3,c4")
+      .mode(SaveMode.Overwrite)
+      .save()
+    checkAnswer(
+      sql("select count(*) from carbon5 where c3 > 300"), Row(700)
+    )
+  }
+
+  test("test load dataframe with string column excluded from the dictionary"){
+    df2.write
+      .format("carbondata")
+      .option("tableName", "carbon6")
+      .option("compress", "true")
+      .option("dictionary_exclude","c2")
+      .mode(SaveMode.Overwrite)
+      .save()
+    checkAnswer(
+      sql("select count(*) from carbon6 where c3 > 300"), Row(700)
+    )
+  }
+
+  test("test load dataframe with both dictionary include and exclude specified"){
+    df2.write
+      .format("carbondata")
+      .option("tableName", "carbon7")
+      .option("compress", "true")
+      .option("dictionary_include","c3,c4")
+      .option("dictionary_exclude","c2")
+      .mode(SaveMode.Overwrite)
+      .save()
+    checkAnswer(
+      sql("select count(*) from carbon7 where c3 > 300"), Row(700)
+    )
+  }
 
   override def afterAll {
     dropTable

http://git-wip-us.apache.org/repos/asf/carbondata/blob/01048f86/integration/spark/src/main/scala/org/apache/carbondata/spark/CarbonDataFrameWriter.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/CarbonDataFrameWriter.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/CarbonDataFrameWriter.scala
index 917e2ed..36e2440 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/CarbonDataFrameWriter.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/CarbonDataFrameWriter.scala
@@ -174,6 +174,10 @@ class CarbonDataFrameWriter(val dataFrame: DataFrame) {
   }
 
   private def makeCreateTableString(schema: StructType, options: CarbonOption): String = {
+    val properties = Map(
+      "DICTIONARY_INCLUDE" -> options.dictionaryInclude,
+      "DICTIONARY_EXCLUDE" -> options.dictionaryExclude
+    ).filter(_._2.isDefined).map(p => s"'${p._1}' = '${p._2.get}'").mkString(",")
     val carbonSchema = schema.map { field =>
       s"${ field.name } ${ convertToCarbonType(field.dataType) }"
     }
@@ -181,6 +185,7 @@ class CarbonDataFrameWriter(val dataFrame: DataFrame) {
           CREATE TABLE IF NOT EXISTS ${options.dbName}.${options.tableName}
           (${ carbonSchema.mkString(", ") })
           STORED BY '${ CarbonContext.datasourceName }'
+          ${ if (properties.nonEmpty) " TBLPROPERTIES (" + properties + ")" else ""}
       """
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/01048f86/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDataFrameWriter.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDataFrameWriter.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDataFrameWriter.scala
index 4de644a..1054c62 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDataFrameWriter.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDataFrameWriter.scala
@@ -161,30 +161,11 @@ class CarbonDataFrameWriter(sqlContext: SQLContext, val dataFrame: DataFrame) {
     val carbonSchema = schema.map { field =>
       s"${ field.name } ${ convertToCarbonType(field.dataType) }"
     }
-    var property = new StringBuilder
-    property.append(
-      if (options.dictionaryInclude.isDefined) {
-        s"'DICTIONARY_INCLUDE' = '${options.dictionaryInclude.get}' ,"
-      } else {
-        ""
-      }
-    ).append(
-      if (options.dictionaryExclude.isDefined) {
-        s"'DICTIONARY_EXCLUDE' = '${options.dictionaryExclude.get}' ,"
-      } else {
-        ""
-      }
-    ).append(
-      if (options.tableBlockSize.isDefined) {
-        s"'table_blocksize' = '${options.tableBlockSize.get}'"
-      } else {
-        ""
-      }
-    )
-    if (property.nonEmpty && property.charAt(property.length-1) == ',') {
-      property = property.replace(property.length - 1, property.length, "")
-    }
-
+    val property = Map(
+      "DICTIONARY_INCLUDE" -> options.dictionaryInclude,
+      "DICTIONARY_EXCLUDE" -> options.dictionaryExclude,
+      "TABLE_BLOCKSIZE" -> options.tableBlockSize
+    ).filter(_._2.isDefined).map(p => s"'${p._1}' = '${p._2.get}'").mkString(",")
     s"""
        | CREATE TABLE IF NOT EXISTS ${options.dbName}.${options.tableName}
        | (${ carbonSchema.mkString(", ") })


[18/42] carbondata git commit: tupleId is not working with vector reader in spark2x

Posted by ra...@apache.org.
tupleId is not working with vector reader in spark2x


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/50da5245
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/50da5245
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/50da5245

Branch: refs/heads/branch-1.1
Commit: 50da52458c0e3868e3358c4d2032c7ffaaca4246
Parents: 3db5584
Author: nareshpr <pr...@gmail.com>
Authored: Thu May 25 00:23:05 2017 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 12:57:59 2017 +0530

----------------------------------------------------------------------
 .../DictionaryBasedVectorResultCollector.java   | 13 ++++++++++-
 .../core/scan/result/AbstractScannedResult.java | 24 ++++++++++++++++++++
 2 files changed, 36 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/50da5245/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedVectorResultCollector.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedVectorResultCollector.java b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedVectorResultCollector.java
index af617be..91afe77 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedVectorResultCollector.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedVectorResultCollector.java
@@ -49,6 +49,8 @@ public class DictionaryBasedVectorResultCollector extends AbstractScannedResultC
 
   protected ColumnVectorInfo[] allColumnInfo;
 
+  protected ColumnVectorInfo[] implictColumnInfo;
+
   public DictionaryBasedVectorResultCollector(BlockExecutionInfo blockExecutionInfos) {
     super(blockExecutionInfos);
     // initialize only if the current block is not a restructured block else the initialization
@@ -66,8 +68,15 @@ public class DictionaryBasedVectorResultCollector extends AbstractScannedResultC
     List<ColumnVectorInfo> dictInfoList = new ArrayList<>();
     List<ColumnVectorInfo> noDictInfoList = new ArrayList<>();
     List<ColumnVectorInfo> complexList = new ArrayList<>();
+    List<ColumnVectorInfo> implictColumnList = new ArrayList<>();
     for (int i = 0; i < queryDimensions.length; i++) {
-      if (!queryDimensions[i].getDimension().hasEncoding(Encoding.DICTIONARY)) {
+      if (queryDimensions[i].getDimension().hasEncoding(Encoding.IMPLICIT)) {
+        ColumnVectorInfo columnVectorInfo = new ColumnVectorInfo();
+        implictColumnList.add(columnVectorInfo);
+        columnVectorInfo.dimension = queryDimensions[i];
+        columnVectorInfo.ordinal = queryDimensions[i].getDimension().getOrdinal();
+        allColumnInfo[queryDimensions[i].getQueryOrder()] = columnVectorInfo;
+      } else if (!queryDimensions[i].getDimension().hasEncoding(Encoding.DICTIONARY)) {
         ColumnVectorInfo columnVectorInfo = new ColumnVectorInfo();
         noDictInfoList.add(columnVectorInfo);
         columnVectorInfo.dimension = queryDimensions[i];
@@ -109,6 +118,7 @@ public class DictionaryBasedVectorResultCollector extends AbstractScannedResultC
     dictionaryInfo = dictInfoList.toArray(new ColumnVectorInfo[dictInfoList.size()]);
     noDictionaryInfo = noDictInfoList.toArray(new ColumnVectorInfo[noDictInfoList.size()]);
     complexInfo = complexList.toArray(new ColumnVectorInfo[complexList.size()]);
+    implictColumnInfo = implictColumnList.toArray(new ColumnVectorInfo[implictColumnList.size()]);
     Arrays.sort(dictionaryInfo);
     Arrays.sort(complexInfo);
   }
@@ -144,6 +154,7 @@ public class DictionaryBasedVectorResultCollector extends AbstractScannedResultC
     scannedResult.fillColumnarNoDictionaryBatch(noDictionaryInfo);
     scannedResult.fillColumnarMeasureBatch(measureColumnInfo, measureInfo.getMeasureOrdinals());
     scannedResult.fillColumnarComplexBatch(complexInfo);
+    scannedResult.fillColumnarImplicitBatch(implictColumnInfo);
     // it means fetched all data out of page so increment the page counter
     if (availableRows == requiredRows) {
       scannedResult.incrementPageCounter();

http://git-wip-us.apache.org/repos/asf/carbondata/blob/50da5245/core/src/main/java/org/apache/carbondata/core/scan/result/AbstractScannedResult.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/result/AbstractScannedResult.java b/core/src/main/java/org/apache/carbondata/core/scan/result/AbstractScannedResult.java
index ac4a85e..e57a290 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/result/AbstractScannedResult.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/result/AbstractScannedResult.java
@@ -26,6 +26,7 @@ import java.util.Map;
 import org.apache.carbondata.common.logging.LogService;
 import org.apache.carbondata.common.logging.LogServiceFactory;
 import org.apache.carbondata.core.cache.update.BlockletLevelDeleteDeltaDataCache;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
 import org.apache.carbondata.core.datastore.chunk.DimensionColumnDataChunk;
 import org.apache.carbondata.core.datastore.chunk.MeasureColumnDataChunk;
 import org.apache.carbondata.core.datastore.chunk.impl.DimensionRawColumnChunk;
@@ -267,6 +268,29 @@ public abstract class AbstractScannedResult {
   }
 
   /**
+   * Fill the column data to vector
+   */
+  public void fillColumnarImplicitBatch(ColumnVectorInfo[] vectorInfo) {
+    int column = 0;
+    for (int i = 0; i < vectorInfo.length; i++) {
+      ColumnVectorInfo columnVectorInfo = vectorInfo[column];
+      CarbonColumnVector vector = columnVectorInfo.vector;
+      int offset = columnVectorInfo.offset;
+      int vectorOffset = columnVectorInfo.vectorOffset;
+      int len = offset + columnVectorInfo.size;
+      for (int j = offset; j < len; j++) {
+        // Considering only String case now as we support only
+        String data = getBlockletId();
+        if (CarbonCommonConstants.CARBON_IMPLICIT_COLUMN_TUPLEID
+            .equals(columnVectorInfo.dimension.getColumnName())) {
+          data = data + CarbonCommonConstants.FILE_SEPARATOR + j;
+        }
+        vector.putBytes(vectorOffset++, offset, data.length(), data.getBytes());
+      }
+    }
+  }
+
+  /**
    * Just increment the counter incase of query only on measures.
    */
   public void incrementCounter() {


[23/42] carbondata git commit: use binarySearch to replace for clause to improve performance

Posted by ra...@apache.org.
use binarySearch to replace for clause to improve performance


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/735e4777
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/735e4777
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/735e4777

Branch: refs/heads/branch-1.1
Commit: 735e4777a13cbc815625c477cfd23ca40d008790
Parents: 9d16d50
Author: mayun <si...@163.com>
Authored: Wed May 24 14:04:43 2017 +0800
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 13:06:35 2017 +0530

----------------------------------------------------------------------
 .../executer/ExcludeFilterExecuterImpl.java     | 13 +++-
 .../executer/ExcludeFilterExecuterImplTest.java | 63 ++++++++++++++++++++
 .../executer/IncludeFilterExecuterImplTest.java | 16 ++---
 3 files changed, 82 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/735e4777/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/ExcludeFilterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/ExcludeFilterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/ExcludeFilterExecuterImpl.java
index 8e7a3c2..7449781 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/ExcludeFilterExecuterImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/ExcludeFilterExecuterImpl.java
@@ -141,14 +141,23 @@ public class ExcludeFilterExecuterImpl implements FilterExecuter {
     return bitSet;
   }
 
+  // use binary search to replace for clause
   private BitSet setFilterdIndexToBitSet(FixedLengthDimensionDataChunk dimColumnDataChunk,
       int numerOfRows) {
     BitSet bitSet = new BitSet(numerOfRows);
     bitSet.flip(0, numerOfRows);
     byte[][] filterValues = dimColumnExecuterInfo.getFilterKeys();
-    for (int k = 0; k < filterValues.length; k++) {
+    if (filterValues.length > 1) {
       for (int j = 0; j < numerOfRows; j++) {
-        if (dimColumnDataChunk.compareTo(j, filterValues[k]) == 0) {
+        int index = CarbonUtil.binarySearch(filterValues, 0, filterValues.length - 1,
+            dimColumnDataChunk.getChunkData(j));
+        if (index >= 0) {
+          bitSet.flip(j);
+        }
+      }
+    } else if (filterValues.length == 1) {
+      for (int j = 0; j < numerOfRows; j++) {
+        if (dimColumnDataChunk.compareTo(j, filterValues[0]) == 0) {
           bitSet.flip(j);
         }
       }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/735e4777/core/src/test/java/org/apache/carbondata/core/scan/filter/executer/ExcludeFilterExecuterImplTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/carbondata/core/scan/filter/executer/ExcludeFilterExecuterImplTest.java b/core/src/test/java/org/apache/carbondata/core/scan/filter/executer/ExcludeFilterExecuterImplTest.java
new file mode 100644
index 0000000..e3ae42c
--- /dev/null
+++ b/core/src/test/java/org/apache/carbondata/core/scan/filter/executer/ExcludeFilterExecuterImplTest.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.carbondata.core.scan.filter.executer;
+
+import java.util.BitSet;
+
+import org.apache.carbondata.core.datastore.chunk.DimensionColumnDataChunk;
+import org.apache.carbondata.core.util.CarbonUtil;
+
+public class ExcludeFilterExecuterImplTest extends IncludeFilterExecuterImplTest {
+
+ @Override public BitSet setFilterdIndexToBitSetNew(DimensionColumnDataChunk dimColumnDataChunk,
+     int numerOfRows, byte[][] filterValues) {
+   BitSet bitSet = new BitSet(numerOfRows);
+   bitSet.flip(0, numerOfRows);
+   // byte[][] filterValues = dimColumnExecuterInfo.getFilterKeys();
+   if (filterValues.length > 1) {
+     for (int j = 0; j < numerOfRows; j++) {
+       int index = CarbonUtil.binarySearch(filterValues, 0, filterValues.length - 1,
+           dimColumnDataChunk.getChunkData(j));
+       if (index >= 0) {
+         bitSet.flip(j);
+       }
+     }
+   } else if (filterValues.length == 1) {
+     for (int j = 0; j < numerOfRows; j++) {
+       if (dimColumnDataChunk.compareTo(j, filterValues[0]) == 0) {
+         bitSet.flip(j);
+       }
+     }
+   }
+   return bitSet;
+ }
+
+ @Override public BitSet setFilterdIndexToBitSet(DimensionColumnDataChunk dimColumnDataChunk,
+      int numerOfRows, byte[][] filterValues) {
+    BitSet bitSet = new BitSet(numerOfRows);
+    bitSet.flip(0, numerOfRows);
+    // byte[][] filterValues = dimColumnExecuterInfo.getFilterKeys();
+    for (int k = 0; k < filterValues.length; k++) {
+      for (int j = 0; j < numerOfRows; j++) {
+        if (dimColumnDataChunk.compareTo(j, filterValues[k]) == 0) {
+          bitSet.flip(j);
+        }
+      }
+    }
+    return bitSet;
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/735e4777/core/src/test/java/org/apache/carbondata/core/scan/filter/executer/IncludeFilterExecuterImplTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/carbondata/core/scan/filter/executer/IncludeFilterExecuterImplTest.java b/core/src/test/java/org/apache/carbondata/core/scan/filter/executer/IncludeFilterExecuterImplTest.java
index 87b9c2d..404f77f 100644
--- a/core/src/test/java/org/apache/carbondata/core/scan/filter/executer/IncludeFilterExecuterImplTest.java
+++ b/core/src/test/java/org/apache/carbondata/core/scan/filter/executer/IncludeFilterExecuterImplTest.java
@@ -36,7 +36,7 @@ public class IncludeFilterExecuterImplTest extends TestCase {
 
   }
 
-  private BitSet setFilterdIndexToBitSetNew(DimensionColumnDataChunk dimensionColumnDataChunk,
+  public BitSet setFilterdIndexToBitSetNew(DimensionColumnDataChunk dimensionColumnDataChunk,
       int numerOfRows, byte[][] filterValues) {
     BitSet bitSet = new BitSet(numerOfRows);
     if (dimensionColumnDataChunk instanceof FixedLengthDimensionDataChunk) {
@@ -60,7 +60,7 @@ public class IncludeFilterExecuterImplTest extends TestCase {
     return bitSet;
   }
 
-  private BitSet setFilterdIndexToBitSet(DimensionColumnDataChunk dimensionColumnDataChunk, int numerOfRows,
+  public BitSet setFilterdIndexToBitSet(DimensionColumnDataChunk dimensionColumnDataChunk, int numerOfRows,
       byte[][] filterValues) {
     BitSet bitSet = new BitSet(numerOfRows);
     if (dimensionColumnDataChunk instanceof FixedLengthDimensionDataChunk) {
@@ -99,8 +99,8 @@ public class IncludeFilterExecuterImplTest extends TestCase {
   @Test
   public void testPerformance() {
 
-    // dimension's data number in a blocklet, usually default is 120000
-    int dataChunkSize = 120000; 
+    // dimension's data number in a blocklet, usually default is 32000
+    int dataChunkSize = 32000; 
     //  repeat query times in the test
     int queryTimes = 5;    
     // repeated times for a dictionary value
@@ -122,8 +122,8 @@ public class IncludeFilterExecuterImplTest extends TestCase {
   @Test
   public void testBoundary() {
 
-	// dimension's data number in a blocklet, usually default is 120000
-    int dataChunkSize = 120000; 
+	// dimension's data number in a blocklet, usually default is 32000
+    int dataChunkSize = 32000; 
     //  repeat query times in the test
     int queryTimes = 5;    
     // repeated times for a dictionary value
@@ -268,8 +268,8 @@ public class IncludeFilterExecuterImplTest extends TestCase {
     long start;
     long end;
     
-    // dimension's data number in a blocklet, usually default is 120000
-    int dataChunkSize = 120000; 
+    // dimension's data number in a blocklet, usually default is 32000
+    int dataChunkSize = 32000; 
     //  repeat query times in the test
     int queryTimes = 10000;    
     // repeated times for a dictionary value


[08/42] carbondata git commit: NullPointer thrown if multi user multi client is used concurrently for show segments, delete segments.

Posted by ra...@apache.org.
NullPointer thrown if multi user multi client is used concurrently for show segments, delete segments.


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/49e8b000
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/49e8b000
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/49e8b000

Branch: refs/heads/branch-1.1
Commit: 49e8b0009cba18c4fdec335867a19e18900a6c2a
Parents: 93f7f96
Author: nareshpr <pr...@gmail.com>
Authored: Wed May 17 23:38:06 2017 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 12:45:05 2017 +0530

----------------------------------------------------------------------
 .../carbondata/spark/load/CarbonLoaderUtil.java |  7 -----
 .../carbondata/spark/util/LoadMetadataUtil.java |  7 +----
 .../org/apache/carbondata/api/CarbonStore.scala | 22 ++++++-------
 .../spark/rdd/DataManagementFunc.scala          | 18 ++++++-----
 .../spark/rdd/CarbonDataRDDFactory.scala        |  2 +-
 .../execution/command/carbonTableSchema.scala   | 20 +++++++++---
 .../spark/rdd/CarbonDataRDDFactory.scala        |  2 +-
 .../execution/command/carbonTableSchema.scala   | 33 ++++++++++++++++----
 .../org/apache/spark/util/CleanFiles.scala      |  4 ++-
 .../apache/spark/util/DeleteSegmentByDate.scala |  4 ++-
 .../apache/spark/util/DeleteSegmentById.scala   |  4 ++-
 .../org/apache/spark/util/ShowSegments.scala    |  5 +--
 12 files changed, 79 insertions(+), 49 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/49e8b000/integration/spark-common/src/main/java/org/apache/carbondata/spark/load/CarbonLoaderUtil.java
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/java/org/apache/carbondata/spark/load/CarbonLoaderUtil.java b/integration/spark-common/src/main/java/org/apache/carbondata/spark/load/CarbonLoaderUtil.java
index 964c536..a4f15d2 100644
--- a/integration/spark-common/src/main/java/org/apache/carbondata/spark/load/CarbonLoaderUtil.java
+++ b/integration/spark-common/src/main/java/org/apache/carbondata/spark/load/CarbonLoaderUtil.java
@@ -360,13 +360,6 @@ public final class CarbonLoaderUtil {
     return date;
   }
 
-  public static String extractLoadMetadataFileLocation(String dbName, String tableName) {
-    CarbonTable carbonTable =
-        org.apache.carbondata.core.metadata.CarbonMetadata.getInstance()
-            .getCarbonTable(dbName + '_' + tableName);
-    return carbonTable.getMetaDataFilepath();
-  }
-
   public static Dictionary getDictionary(DictionaryColumnUniqueIdentifier columnIdentifier,
       String carbonStorePath) throws IOException {
     Cache<DictionaryColumnUniqueIdentifier, Dictionary> dictCache =

http://git-wip-us.apache.org/repos/asf/carbondata/blob/49e8b000/integration/spark-common/src/main/java/org/apache/carbondata/spark/util/LoadMetadataUtil.java
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/java/org/apache/carbondata/spark/util/LoadMetadataUtil.java b/integration/spark-common/src/main/java/org/apache/carbondata/spark/util/LoadMetadataUtil.java
index 0eec314..91a9556 100644
--- a/integration/spark-common/src/main/java/org/apache/carbondata/spark/util/LoadMetadataUtil.java
+++ b/integration/spark-common/src/main/java/org/apache/carbondata/spark/util/LoadMetadataUtil.java
@@ -18,8 +18,6 @@
 package org.apache.carbondata.spark.util;
 
 import org.apache.carbondata.core.constants.CarbonCommonConstants;
-import org.apache.carbondata.core.metadata.CarbonMetadata;
-import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
 import org.apache.carbondata.core.statusmanager.LoadMetadataDetails;
 import org.apache.carbondata.core.statusmanager.SegmentStatusManager;
 
@@ -31,10 +29,7 @@ public final class LoadMetadataUtil {
 
   }
 
-  public static boolean isLoadDeletionRequired(String dbName, String tableName) {
-    CarbonTable table = CarbonMetadata.getInstance().getCarbonTable(dbName + '_' + tableName);
-
-    String metaDataLocation = table.getMetaDataFilepath();
+  public static boolean isLoadDeletionRequired(String metaDataLocation) {
     LoadMetadataDetails[] details = SegmentStatusManager.readLoadMetadata(metaDataLocation);
     if (details != null && details.length != 0) {
       for (LoadMetadataDetails oneRow : details) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/49e8b000/integration/spark-common/src/main/scala/org/apache/carbondata/api/CarbonStore.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/api/CarbonStore.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/api/CarbonStore.scala
index 8e885a0..45719fc 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/api/CarbonStore.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/api/CarbonStore.scala
@@ -29,6 +29,7 @@ import org.apache.spark.sql.types.TimestampType
 import org.apache.carbondata.common.logging.LogServiceFactory
 import org.apache.carbondata.core.constants.CarbonCommonConstants
 import org.apache.carbondata.core.metadata.{AbsoluteTableIdentifier, CarbonMetadata}
+import org.apache.carbondata.core.metadata.schema.table.CarbonTable
 import org.apache.carbondata.core.statusmanager.SegmentStatusManager
 import org.apache.carbondata.spark.exception.MalformedCarbonCommandException
 import org.apache.carbondata.spark.rdd.DataManagementFunc
@@ -39,11 +40,9 @@ object CarbonStore {
   def showSegments(
       dbName: String,
       tableName: String,
-      limit: Option[String]): Seq[Row] = {
-    val tableUniqueName = dbName + "_" + tableName
-    val carbonTable = CarbonMetadata.getInstance().getCarbonTable(tableUniqueName)
-    val path = carbonTable.getMetaDataFilepath
-    val loadMetadataDetailsArray = SegmentStatusManager.readLoadMetadata(path)
+      limit: Option[String],
+      tableFolderPath: String): Seq[Row] = {
+    val loadMetadataDetailsArray = SegmentStatusManager.readLoadMetadata(tableFolderPath)
     if (loadMetadataDetailsArray.nonEmpty) {
       val parser = new SimpleDateFormat(CarbonCommonConstants.CARBON_TIMESTAMP)
       var loadMetadataDetailsSortedArray = loadMetadataDetailsArray.sortWith { (l1, l2) =>
@@ -79,10 +78,11 @@ object CarbonStore {
   def cleanFiles(
       dbName: String,
       tableName: String,
-      storePath: String): Unit = {
+      storePath: String,
+      carbonTable: CarbonTable): Unit = {
     LOGGER.audit(s"The clean files request has been received for $dbName.$tableName")
     try {
-      DataManagementFunc.cleanFiles(dbName, tableName, storePath)
+      DataManagementFunc.cleanFiles(dbName, tableName, storePath, carbonTable)
       LOGGER.audit(s"Clean files operation is success for $dbName.$tableName.")
     } catch {
       case ex: Exception =>
@@ -102,12 +102,12 @@ object CarbonStore {
   def deleteLoadById(
       loadids: Seq[String],
       dbName: String,
-      tableName: String): Unit = {
+      tableName: String,
+      carbonTable: CarbonTable): Unit = {
 
     LOGGER.audit(s"Delete segment by Id request has been received for $dbName.$tableName")
     validateLoadIds(loadids)
 
-    val carbonTable = CarbonMetadata.getInstance().getCarbonTable(dbName + '_' + tableName)
     val path = carbonTable.getMetaDataFilepath
 
     try {
@@ -128,11 +128,11 @@ object CarbonStore {
   def deleteLoadByDate(
       timestamp: String,
       dbName: String,
-      tableName: String): Unit = {
+      tableName: String,
+      carbonTable: CarbonTable): Unit = {
     LOGGER.audit(s"Delete segment by Id request has been received for $dbName.$tableName")
 
     val time = validateTimeFormat(timestamp)
-    val carbonTable = CarbonMetadata.getInstance().getCarbonTable(dbName + '_' + tableName)
     val path = carbonTable.getMetaDataFilepath
 
     try {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/49e8b000/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/DataManagementFunc.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/DataManagementFunc.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/DataManagementFunc.scala
index d6cc2e6..8039d24 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/DataManagementFunc.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/DataManagementFunc.scala
@@ -297,11 +297,10 @@ object DataManagementFunc {
       dbName: String,
       tableName: String,
       storePath: String,
-      isForceDeletion: Boolean): Unit = {
-    if (LoadMetadataUtil.isLoadDeletionRequired(dbName, tableName)) {
-      val loadMetadataFilePath =
-        CarbonLoaderUtil.extractLoadMetadataFileLocation(dbName, tableName)
-      val details = SegmentStatusManager.readLoadMetadata(loadMetadataFilePath)
+      isForceDeletion: Boolean,
+      carbonTable: CarbonTable): Unit = {
+    if (LoadMetadataUtil.isLoadDeletionRequired(carbonTable.getMetaDataFilepath)) {
+      val details = SegmentStatusManager.readLoadMetadata(carbonTable.getMetaDataFilepath)
       val carbonTableStatusLock =
         CarbonLockFactory.getCarbonLockObj(
           new CarbonTableIdentifier(dbName, tableName, ""),
@@ -325,7 +324,8 @@ object DataManagementFunc {
             LOGGER.info("Table status lock has been successfully acquired.")
 
             // read latest table status again.
-            val latestMetadata = SegmentStatusManager.readLoadMetadata(loadMetadataFilePath)
+            val latestMetadata = SegmentStatusManager
+              .readLoadMetadata(carbonTable.getMetaDataFilepath)
 
             // update the metadata details from old to new status.
             val latestStatus = CarbonLoaderUtil
@@ -351,14 +351,16 @@ object DataManagementFunc {
   def cleanFiles(
       dbName: String,
       tableName: String,
-      storePath: String): Unit = {
+      storePath: String,
+      carbonTable: CarbonTable): Unit = {
     val identifier = new CarbonTableIdentifier(dbName, tableName, "")
     val carbonCleanFilesLock =
       CarbonLockFactory.getCarbonLockObj(identifier, LockUsage.CLEAN_FILES_LOCK)
     try {
       if (carbonCleanFilesLock.lockWithRetries()) {
         LOGGER.info("Clean files lock has been successfully acquired.")
-        deleteLoadsAndUpdateMetadata(dbName, tableName, storePath, isForceDeletion = true)
+        deleteLoadsAndUpdateMetadata(dbName, tableName, storePath,
+          isForceDeletion = true, carbonTable)
       } else {
         val errorMsg = "Clean files request is failed for " +
             s"$dbName.$tableName" +

http://git-wip-us.apache.org/repos/asf/carbondata/blob/49e8b000/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
index 8f4727a..f159c61 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
@@ -436,7 +436,7 @@ object CarbonDataRDDFactory {
           s" ${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
       // Check if any load need to be deleted before loading new data
       DataManagementFunc.deleteLoadsAndUpdateMetadata(carbonLoadModel.getDatabaseName,
-        carbonLoadModel.getTableName, storePath, isForceDeletion = false)
+        carbonLoadModel.getTableName, storePath, false, carbonTable)
       if (null == carbonLoadModel.getLoadMetadataDetails) {
         CommonUtil.readLoadMetadataDetails(carbonLoadModel, storePath)
       }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/49e8b000/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
index ac51fa0..1192e08 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
@@ -196,10 +196,13 @@ private[sql] case class DeleteLoadsById(
 
   def run(sqlContext: SQLContext): Seq[Row] = {
     Checker.validateTableExists(databaseNameOp, tableName, sqlContext)
+    val carbonTable = CarbonEnv.get.carbonMetastore.lookupRelation1(databaseNameOp,
+      tableName)(sqlContext).asInstanceOf[CarbonRelation].tableMeta.carbonTable
     CarbonStore.deleteLoadById(
       loadids,
       getDB.getDatabaseName(databaseNameOp, sqlContext),
-      tableName
+      tableName,
+      carbonTable
     )
     Seq.empty
 
@@ -225,10 +228,13 @@ private[sql] case class DeleteLoadsByLoadDate(
 
   def run(sqlContext: SQLContext): Seq[Row] = {
     Checker.validateTableExists(databaseNameOp, tableName, sqlContext)
+    val carbonTable = CarbonEnv.get.carbonMetastore.lookupRelation1(databaseNameOp,
+      tableName)(sqlContext).asInstanceOf[CarbonRelation].tableMeta.carbonTable
     CarbonStore.deleteLoadByDate(
       loadDate,
       getDB.getDatabaseName(databaseNameOp, sqlContext),
-      tableName
+      tableName,
+      carbonTable
     )
     Seq.empty
 
@@ -743,10 +749,13 @@ private[sql] case class ShowLoads(
 
   override def run(sqlContext: SQLContext): Seq[Row] = {
     Checker.validateTableExists(databaseNameOp, tableName, sqlContext)
+    val carbonTable = CarbonEnv.get.carbonMetastore.lookupRelation1(databaseNameOp,
+      tableName)(sqlContext).asInstanceOf[CarbonRelation].tableMeta.carbonTable
     CarbonStore.showSegments(
       getDB.getDatabaseName(databaseNameOp, sqlContext),
       tableName,
-      limit
+      limit,
+      carbonTable.getMetaDataFilepath
     )
   }
 }
@@ -885,10 +894,13 @@ private[sql] case class CleanFiles(
 
   def run(sqlContext: SQLContext): Seq[Row] = {
     Checker.validateTableExists(databaseNameOp, tableName, sqlContext)
+    val carbonTable = CarbonEnv.get.carbonMetastore.lookupRelation1(databaseNameOp,
+      tableName)(sqlContext).asInstanceOf[CarbonRelation].tableMeta.carbonTable
     CarbonStore.cleanFiles(
       getDB.getDatabaseName(databaseNameOp, sqlContext),
       tableName,
-      sqlContext.asInstanceOf[CarbonContext].storePath
+      sqlContext.asInstanceOf[CarbonContext].storePath,
+      carbonTable
     )
     Seq.empty
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/49e8b000/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala b/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
index ede63ec..e8627a1 100644
--- a/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
+++ b/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
@@ -449,7 +449,7 @@ object CarbonDataRDDFactory {
           s" ${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
       // Check if any load need to be deleted before loading new data
       DataManagementFunc.deleteLoadsAndUpdateMetadata(carbonLoadModel.getDatabaseName,
-        carbonLoadModel.getTableName, storePath, isForceDeletion = false)
+        carbonLoadModel.getTableName, storePath, false, carbonTable)
       if (null == carbonLoadModel.getLoadMetadataDetails) {
         CommonUtil.readLoadMetadataDetails(carbonLoadModel, storePath)
       }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/49e8b000/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
index 6bc9e61..e2405f2 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
@@ -205,10 +205,14 @@ case class DeleteLoadsById(
   def run(sparkSession: SparkSession): Seq[Row] = {
 
     Checker.validateTableExists(databaseNameOp, tableName, sparkSession)
+    val carbonTable = CarbonEnv.getInstance(sparkSession).carbonMetastore
+      .getTableFromMetadata(getDB.getDatabaseName(databaseNameOp, sparkSession), tableName)
+      .map(_.carbonTable).getOrElse(null)
     CarbonStore.deleteLoadById(
       loadids,
       getDB.getDatabaseName(databaseNameOp, sparkSession),
-      tableName
+      tableName,
+      carbonTable
     )
     Seq.empty
 
@@ -226,10 +230,14 @@ case class DeleteLoadsByLoadDate(
 
   def run(sparkSession: SparkSession): Seq[Row] = {
     Checker.validateTableExists(databaseNameOp, tableName, sparkSession)
+    val carbonTable = CarbonEnv.getInstance(sparkSession).carbonMetastore
+      .getTableFromMetadata(getDB.getDatabaseName(databaseNameOp, sparkSession), tableName)
+      .map(_.carbonTable).getOrElse(null)
     CarbonStore.deleteLoadByDate(
       loadDate,
       getDB.getDatabaseName(databaseNameOp, sparkSession),
-      tableName
+      tableName,
+      carbonTable
     )
     Seq.empty
   }
@@ -662,10 +670,14 @@ private[sql] case class DeleteLoadByDate(
 
   def run(sparkSession: SparkSession): Seq[Row] = {
     Checker.validateTableExists(databaseNameOp, tableName, sparkSession)
+    val carbonTable = CarbonEnv.getInstance(sparkSession).carbonMetastore
+      .getTableFromMetadata(getDB.getDatabaseName(databaseNameOp, sparkSession), tableName)
+      .map(_.carbonTable).getOrElse(null)
     CarbonStore.deleteLoadByDate(
       loadDate,
       getDB.getDatabaseName(databaseNameOp, sparkSession),
-      tableName
+      tableName,
+      carbonTable
     )
     Seq.empty
   }
@@ -680,12 +692,17 @@ case class CleanFiles(
 
   def run(sparkSession: SparkSession): Seq[Row] = {
     Checker.validateTableExists(databaseNameOp, tableName, sparkSession)
-    val relation = CarbonEnv.getInstance(sparkSession).carbonMetastore
+    val catalog = CarbonEnv.getInstance(sparkSession).carbonMetastore
+    val relation = catalog
       .lookupRelation(databaseNameOp, tableName)(sparkSession).asInstanceOf[CarbonRelation]
+    val carbonTable = catalog
+      .getTableFromMetadata(getDB.getDatabaseName(databaseNameOp, sparkSession), tableName)
+      .map(_.carbonTable).getOrElse(null)
     CarbonStore.cleanFiles(
       getDB.getDatabaseName(databaseNameOp, sparkSession),
       tableName,
-      relation.asInstanceOf[CarbonRelation].tableMeta.storePath
+      relation.asInstanceOf[CarbonRelation].tableMeta.storePath,
+      carbonTable
     )
     Seq.empty
   }
@@ -699,10 +716,14 @@ case class ShowLoads(
 
   def run(sparkSession: SparkSession): Seq[Row] = {
     Checker.validateTableExists(databaseNameOp, tableName, sparkSession)
+    val carbonTable = CarbonEnv.getInstance(sparkSession).carbonMetastore
+      .getTableFromMetadata(getDB.getDatabaseName(databaseNameOp, sparkSession), tableName)
+      .map(_.carbonTable).getOrElse(null)
     CarbonStore.showSegments(
       getDB.getDatabaseName(databaseNameOp, sparkSession),
       tableName,
-      limit
+      limit,
+      carbonTable.getMetaDataFilepath
     )
   }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/49e8b000/integration/spark2/src/main/scala/org/apache/spark/util/CleanFiles.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/util/CleanFiles.scala b/integration/spark2/src/main/scala/org/apache/spark/util/CleanFiles.scala
index b9a6708..74e11f1 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/util/CleanFiles.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/util/CleanFiles.scala
@@ -30,7 +30,9 @@ object CleanFiles {
   def cleanFiles(spark: SparkSession, dbName: String, tableName: String,
       storePath: String): Unit = {
     TableAPIUtil.validateTableExists(spark, dbName, tableName)
-    CarbonStore.cleanFiles(dbName, tableName, storePath)
+    val carbonTable = CarbonEnv.getInstance(spark).carbonMetastore
+      .getTableFromMetadata(dbName, tableName).map(_.carbonTable).getOrElse(null)
+    CarbonStore.cleanFiles(dbName, tableName, storePath, carbonTable)
   }
 
   def main(args: Array[String]): Unit = {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/49e8b000/integration/spark2/src/main/scala/org/apache/spark/util/DeleteSegmentByDate.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/util/DeleteSegmentByDate.scala b/integration/spark2/src/main/scala/org/apache/spark/util/DeleteSegmentByDate.scala
index 7815417..3dffb42 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/util/DeleteSegmentByDate.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/util/DeleteSegmentByDate.scala
@@ -29,7 +29,9 @@ object DeleteSegmentByDate {
   def deleteSegmentByDate(spark: SparkSession, dbName: String, tableName: String,
       dateValue: String): Unit = {
     TableAPIUtil.validateTableExists(spark, dbName, tableName)
-    CarbonStore.deleteLoadByDate(dateValue, dbName, tableName)
+    val carbonTable = CarbonEnv.getInstance(spark).carbonMetastore
+      .getTableFromMetadata(dbName, tableName).map(_.carbonTable).getOrElse(null)
+    CarbonStore.deleteLoadByDate(dateValue, dbName, tableName, carbonTable)
   }
 
   def main(args: Array[String]): Unit = {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/49e8b000/integration/spark2/src/main/scala/org/apache/spark/util/DeleteSegmentById.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/util/DeleteSegmentById.scala b/integration/spark2/src/main/scala/org/apache/spark/util/DeleteSegmentById.scala
index 65b76b2..35afa28 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/util/DeleteSegmentById.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/util/DeleteSegmentById.scala
@@ -33,7 +33,9 @@ object DeleteSegmentById {
   def deleteSegmentById(spark: SparkSession, dbName: String, tableName: String,
       segmentIds: Seq[String]): Unit = {
     TableAPIUtil.validateTableExists(spark, dbName, tableName)
-    CarbonStore.deleteLoadById(segmentIds, dbName, tableName)
+    val carbonTable = CarbonEnv.getInstance(spark).carbonMetastore
+      .getTableFromMetadata(dbName, tableName).map(_.carbonTable).getOrElse(null)
+    CarbonStore.deleteLoadById(segmentIds, dbName, tableName, carbonTable)
   }
 
   def main(args: Array[String]): Unit = {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/49e8b000/integration/spark2/src/main/scala/org/apache/spark/util/ShowSegments.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/util/ShowSegments.scala b/integration/spark2/src/main/scala/org/apache/spark/util/ShowSegments.scala
index d918381..07dfcc1 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/util/ShowSegments.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/util/ShowSegments.scala
@@ -29,9 +29,10 @@ object ShowSegments {
 
   def showSegments(spark: SparkSession, dbName: String, tableName: String,
       limit: Option[String]): Seq[Row] = {
-    //val databaseName = dbName.getOrElse(spark.catalog.currentDatabase)
     TableAPIUtil.validateTableExists(spark, dbName, tableName)
-    CarbonStore.showSegments(dbName, tableName, limit)
+    val carbonTable = CarbonEnv.getInstance(spark).carbonMetastore
+      .getTableFromMetadata(dbName, tableName).map(_.carbonTable).getOrElse(null)
+    CarbonStore.showSegments(dbName, tableName, limit, carbonTable.getMetaDataFilepath)
   }
 
   def showString(rows: Seq[Row]): String = {


[36/42] carbondata git commit: Test PR988 #1

Posted by ra...@apache.org.
Test PR988 #1

Test PR988 #2

Test PR988 #3


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/2c83e022
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/2c83e022
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/2c83e022

Branch: refs/heads/branch-1.1
Commit: 2c83e022282b593946a510f215e023d2e98cdac2
Parents: fcb2092
Author: chenerlu <ch...@huawei.com>
Authored: Sun Jun 4 15:58:38 2017 +0800
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 13:26:40 2017 +0530

----------------------------------------------------------------------
 .../carbondata/core/scan/scanner/impl/FilterScanner.java     | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/2c83e022/core/src/main/java/org/apache/carbondata/core/scan/scanner/impl/FilterScanner.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/scanner/impl/FilterScanner.java b/core/src/main/java/org/apache/carbondata/core/scan/scanner/impl/FilterScanner.java
index a224687..8f14b85 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/scanner/impl/FilterScanner.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/scanner/impl/FilterScanner.java
@@ -282,15 +282,15 @@ public class FilterScanner extends AbstractBlockletScanner {
     MeasureColumnDataChunk[][] measureColumnDataChunks =
         new MeasureColumnDataChunk[measureRawColumnChunks.length][indexesGroup.length];
     for (int i = 0; i < dimensionRawColumnChunks.length; i++) {
-      for (int j = 0; j < indexesGroup.length; j++) {
-        if (dimensionRawColumnChunks[i] != null) {
+      if (dimensionRawColumnChunks[i] != null) {
+        for (int j = 0; j < indexesGroup.length; j++) {
           dimensionColumnDataChunks[i][j] = dimensionRawColumnChunks[i].convertToDimColDataChunk(j);
         }
       }
     }
     for (int i = 0; i < measureRawColumnChunks.length; i++) {
-      for (int j = 0; j < indexesGroup.length; j++) {
-        if (measureRawColumnChunks[i] != null) {
+      if (measureRawColumnChunks[i] != null) {
+        for (int j = 0; j < indexesGroup.length; j++) {
           measureColumnDataChunks[i][j] = measureRawColumnChunks[i].convertToMeasureColDataChunk(j);
         }
       }


[05/42] carbondata git commit: Query statistics issue in case of multiple blocklet and block

Posted by ra...@apache.org.
Query statistics issue in case of multiple blocklet and block


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/8c5540d2
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/8c5540d2
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/8c5540d2

Branch: refs/heads/branch-1.1
Commit: 8c5540d27a43f335b407098aac784ef0ecd58361
Parents: 6f55450
Author: akashrn5 <ak...@gmail.com>
Authored: Wed May 17 15:06:52 2017 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 12:44:32 2017 +0530

----------------------------------------------------------------------
 .../carbondata/core/datastore/FileHolder.java   |  8 +++
 .../core/datastore/impl/DFSFileHolderImpl.java  | 21 +++++++-
 .../core/datastore/impl/FileHolderImpl.java     | 17 +++++++
 .../AbstractDetailQueryResultIterator.java      | 19 ++++++-
 .../scan/scanner/AbstractBlockletScanner.java   |  7 ---
 .../core/scan/scanner/impl/FilterScanner.java   | 53 +++++++++++++-------
 .../core/stats/QueryStatisticsConstants.java    |  5 +-
 .../core/stats/QueryStatisticsRecorderImpl.java |  8 ++-
 .../carbondata/core/util/BitSetGroup.java       | 13 ++++-
 9 files changed, 122 insertions(+), 29 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/8c5540d2/core/src/main/java/org/apache/carbondata/core/datastore/FileHolder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/FileHolder.java b/core/src/main/java/org/apache/carbondata/core/datastore/FileHolder.java
index 1b972bc..712e116 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/FileHolder.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/FileHolder.java
@@ -17,6 +17,7 @@
 
 package org.apache.carbondata.core.datastore;
 
+import java.io.DataInputStream;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 
@@ -97,4 +98,11 @@ public interface FileHolder {
    * This method will be used to close all the streams currently present in the cache
    */
   void finish() throws IOException;
+
+  void setQueryId(String queryId);
+
+  String getQueryId();
+
+  DataInputStream getDataInputStream(String filePath, long offset) throws IOException;
+
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/8c5540d2/core/src/main/java/org/apache/carbondata/core/datastore/impl/DFSFileHolderImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/impl/DFSFileHolderImpl.java b/core/src/main/java/org/apache/carbondata/core/datastore/impl/DFSFileHolderImpl.java
index d14cff7..6e7a55b 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/impl/DFSFileHolderImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/impl/DFSFileHolderImpl.java
@@ -16,6 +16,8 @@
  */
 package org.apache.carbondata.core.datastore.impl;
 
+import java.io.BufferedInputStream;
+import java.io.DataInputStream;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.HashMap;
@@ -29,13 +31,15 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 
-
 public class DFSFileHolderImpl implements FileHolder {
   /**
    * cache to hold filename and its stream
    */
   private Map<String, FSDataInputStream> fileNameAndStreamCache;
 
+  private String queryId;
+
+
   public DFSFileHolderImpl() {
     this.fileNameAndStreamCache =
         new HashMap<String, FSDataInputStream>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
@@ -138,4 +142,19 @@ public class DFSFileHolderImpl implements FileHolder {
     byteBuffer.rewind();
     return byteBuffer;
   }
+
+  @Override public void setQueryId(String queryId) {
+    this.queryId = queryId;
+  }
+
+  @Override public String getQueryId() {
+    return queryId;
+  }
+
+  @Override public DataInputStream getDataInputStream(String filePath, long offset)
+      throws IOException {
+    FSDataInputStream fsDataInputStream = updateCache(filePath);
+    fsDataInputStream.seek(offset);
+    return new DataInputStream(new BufferedInputStream(fsDataInputStream, 1 * 1024 * 1024));
+  }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/8c5540d2/core/src/main/java/org/apache/carbondata/core/datastore/impl/FileHolderImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/impl/FileHolderImpl.java b/core/src/main/java/org/apache/carbondata/core/datastore/impl/FileHolderImpl.java
index 36b48f5..4471013 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/impl/FileHolderImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/impl/FileHolderImpl.java
@@ -17,6 +17,8 @@
 
 package org.apache.carbondata.core.datastore.impl;
 
+import java.io.BufferedInputStream;
+import java.io.DataInputStream;
 import java.io.FileInputStream;
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -34,6 +36,7 @@ public class FileHolderImpl implements FileHolder {
    * cache to hold filename and its stream
    */
   private Map<String, FileChannel> fileNameAndStreamCache;
+  private String queryId;
 
   /**
    * FileHolderImpl Constructor
@@ -203,4 +206,18 @@ public class FileHolderImpl implements FileHolder {
     return byteBuffer;
   }
 
+  @Override public void setQueryId(String queryId) {
+    this.queryId = queryId;
+  }
+
+  @Override public String getQueryId() {
+    return queryId;
+  }
+
+  @Override public DataInputStream getDataInputStream(String filePath, long offset)
+      throws IOException {
+    FileInputStream stream = new FileInputStream(filePath);
+    stream.skip(offset);
+    return new DataInputStream(new BufferedInputStream(stream));
+  }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/8c5540d2/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/AbstractDetailQueryResultIterator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/AbstractDetailQueryResultIterator.java b/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/AbstractDetailQueryResultIterator.java
index 4e5681c..a0823af 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/AbstractDetailQueryResultIterator.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/AbstractDetailQueryResultIterator.java
@@ -97,6 +97,7 @@ public abstract class AbstractDetailQueryResultIterator<E> extends CarbonIterato
     this.blockExecutionInfos = infos;
     this.fileReader = FileFactory.getFileHolder(
         FileFactory.getFileType(queryModel.getAbsoluteTableIdentifier().getStorePath()));
+    this.fileReader.setQueryId(queryModel.getQueryId());
     this.execService = execService;
     intialiseInfos();
     initQueryStatiticsModel();
@@ -146,7 +147,6 @@ public abstract class AbstractDetailQueryResultIterator<E> extends CarbonIterato
     if (blockExecutionInfos.size() > 0) {
       BlockExecutionInfo executionInfo = blockExecutionInfos.get(0);
       blockExecutionInfos.remove(executionInfo);
-      queryStatisticsModel.setRecorder(recorder);
       return new DataBlockIteratorImpl(executionInfo, fileReader, batchSize, queryStatisticsModel,
           execService);
     }
@@ -155,24 +155,41 @@ public abstract class AbstractDetailQueryResultIterator<E> extends CarbonIterato
 
   protected void initQueryStatiticsModel() {
     this.queryStatisticsModel = new QueryStatisticsModel();
+    this.queryStatisticsModel.setRecorder(recorder);
     QueryStatistic queryStatisticTotalBlocklet = new QueryStatistic();
     queryStatisticsModel.getStatisticsTypeAndObjMap()
         .put(QueryStatisticsConstants.TOTAL_BLOCKLET_NUM, queryStatisticTotalBlocklet);
+    queryStatisticsModel.getRecorder().recordStatistics(queryStatisticTotalBlocklet);
+
     QueryStatistic queryStatisticValidScanBlocklet = new QueryStatistic();
     queryStatisticsModel.getStatisticsTypeAndObjMap()
         .put(QueryStatisticsConstants.VALID_SCAN_BLOCKLET_NUM, queryStatisticValidScanBlocklet);
+    queryStatisticsModel.getRecorder().recordStatistics(queryStatisticValidScanBlocklet);
+
     QueryStatistic totalNumberOfPages = new QueryStatistic();
     queryStatisticsModel.getStatisticsTypeAndObjMap()
         .put(QueryStatisticsConstants.TOTAL_PAGE_SCANNED, totalNumberOfPages);
+    queryStatisticsModel.getRecorder().recordStatistics(totalNumberOfPages);
+
     QueryStatistic validPages = new QueryStatistic();
     queryStatisticsModel.getStatisticsTypeAndObjMap()
         .put(QueryStatisticsConstants.VALID_PAGE_SCANNED, validPages);
+    queryStatisticsModel.getRecorder().recordStatistics(validPages);
+
+    QueryStatistic scannedPages = new QueryStatistic();
+    queryStatisticsModel.getStatisticsTypeAndObjMap()
+        .put(QueryStatisticsConstants.PAGE_SCANNED, scannedPages);
+    queryStatisticsModel.getRecorder().recordStatistics(scannedPages);
+
     QueryStatistic scanTime = new QueryStatistic();
     queryStatisticsModel.getStatisticsTypeAndObjMap()
         .put(QueryStatisticsConstants.SCAN_BLOCKlET_TIME, scanTime);
+    queryStatisticsModel.getRecorder().recordStatistics(scanTime);
+
     QueryStatistic readTime = new QueryStatistic();
     queryStatisticsModel.getStatisticsTypeAndObjMap()
         .put(QueryStatisticsConstants.READ_BLOCKlET_TIME, readTime);
+    queryStatisticsModel.getRecorder().recordStatistics(readTime);
   }
 
   public void processNextBatch(CarbonColumnarBatch columnarBatch) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/8c5540d2/core/src/main/java/org/apache/carbondata/core/scan/scanner/AbstractBlockletScanner.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/scanner/AbstractBlockletScanner.java b/core/src/main/java/org/apache/carbondata/core/scan/scanner/AbstractBlockletScanner.java
index e8bfc74..0fb9782 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/scanner/AbstractBlockletScanner.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/scanner/AbstractBlockletScanner.java
@@ -60,26 +60,21 @@ public abstract class AbstractBlockletScanner implements BlockletScanner {
         .get(QueryStatisticsConstants.TOTAL_BLOCKLET_NUM);
     totalBlockletStatistic.addCountStatistic(QueryStatisticsConstants.TOTAL_BLOCKLET_NUM,
         totalBlockletStatistic.getCount() + 1);
-    queryStatisticsModel.getRecorder().recordStatistics(totalBlockletStatistic);
     QueryStatistic validScannedBlockletStatistic = queryStatisticsModel.getStatisticsTypeAndObjMap()
         .get(QueryStatisticsConstants.VALID_SCAN_BLOCKLET_NUM);
     validScannedBlockletStatistic
         .addCountStatistic(QueryStatisticsConstants.VALID_SCAN_BLOCKLET_NUM,
             validScannedBlockletStatistic.getCount() + 1);
-    queryStatisticsModel.getRecorder().recordStatistics(validScannedBlockletStatistic);
     // adding statistics for valid number of pages
     QueryStatistic validPages = queryStatisticsModel.getStatisticsTypeAndObjMap()
         .get(QueryStatisticsConstants.VALID_PAGE_SCANNED);
     validPages.addCountStatistic(QueryStatisticsConstants.VALID_PAGE_SCANNED,
         validPages.getCount() + blocksChunkHolder.getDataBlock().numberOfPages());
-    queryStatisticsModel.getRecorder().recordStatistics(validPages);
     // adding statistics for number of pages
     QueryStatistic totalPagesScanned = queryStatisticsModel.getStatisticsTypeAndObjMap()
         .get(QueryStatisticsConstants.TOTAL_PAGE_SCANNED);
     totalPagesScanned.addCountStatistic(QueryStatisticsConstants.TOTAL_PAGE_SCANNED,
         totalPagesScanned.getCount() + blocksChunkHolder.getDataBlock().numberOfPages());
-    queryStatisticsModel.getRecorder().recordStatistics(totalPagesScanned);
-
     scannedResult.setBlockletId(
         blockExecutionInfo.getBlockId() + CarbonCommonConstants.FILE_SEPARATOR + blocksChunkHolder
             .getDataBlock().nodeNumber());
@@ -132,7 +127,6 @@ public abstract class AbstractBlockletScanner implements BlockletScanner {
         .get(QueryStatisticsConstants.SCAN_BLOCKlET_TIME);
     scanTime.addCountStatistic(QueryStatisticsConstants.SCAN_BLOCKlET_TIME,
         scanTime.getCount() + (System.currentTimeMillis() - startTime));
-    queryStatisticsModel.getRecorder().recordStatistics(scanTime);
     return scannedResult;
   }
 
@@ -151,7 +145,6 @@ public abstract class AbstractBlockletScanner implements BlockletScanner {
         .get(QueryStatisticsConstants.READ_BLOCKlET_TIME);
     readTime.addCountStatistic(QueryStatisticsConstants.READ_BLOCKlET_TIME,
         readTime.getCount() + (System.currentTimeMillis() - startTime));
-    queryStatisticsModel.getRecorder().recordStatistics(readTime);
   }
 
   @Override public AbstractScannedResult createEmptyResult() {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/8c5540d2/core/src/main/java/org/apache/carbondata/core/scan/scanner/impl/FilterScanner.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/scanner/impl/FilterScanner.java b/core/src/main/java/org/apache/carbondata/core/scan/scanner/impl/FilterScanner.java
index 86a2e8b..a224687 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/scanner/impl/FilterScanner.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/scanner/impl/FilterScanner.java
@@ -98,7 +98,6 @@ public class FilterScanner extends AbstractBlockletScanner {
         .get(QueryStatisticsConstants.TOTAL_PAGE_SCANNED);
     totalPagesScanned.addCountStatistic(QueryStatisticsConstants.TOTAL_PAGE_SCANNED,
         totalPagesScanned.getCount() + blocksChunkHolder.getDataBlock().numberOfPages());
-    queryStatisticsModel.getRecorder().recordStatistics(totalPagesScanned);
     // apply min max
     if (isMinMaxEnabled) {
       BitSet bitSet = this.filterExecuter
@@ -121,7 +120,6 @@ public class FilterScanner extends AbstractBlockletScanner {
         .get(QueryStatisticsConstants.READ_BLOCKlET_TIME);
     readTime.addCountStatistic(QueryStatisticsConstants.READ_BLOCKlET_TIME,
         readTime.getCount() + (System.currentTimeMillis() - startTime));
-    queryStatisticsModel.getRecorder().recordStatistics(readTime);
   }
 
   /**
@@ -143,12 +141,26 @@ public class FilterScanner extends AbstractBlockletScanner {
   private AbstractScannedResult fillScannedResult(BlocksChunkHolder blocksChunkHolder)
       throws FilterUnsupportedException, IOException {
     long startTime = System.currentTimeMillis();
+    QueryStatistic totalBlockletStatistic = queryStatisticsModel.getStatisticsTypeAndObjMap()
+        .get(QueryStatisticsConstants.TOTAL_BLOCKLET_NUM);
+    totalBlockletStatistic.addCountStatistic(QueryStatisticsConstants.TOTAL_BLOCKLET_NUM,
+        totalBlockletStatistic.getCount() + 1);
     // apply filter on actual data
     BitSetGroup bitSetGroup = this.filterExecuter.applyFilter(blocksChunkHolder);
     // if indexes is empty then return with empty result
     if (bitSetGroup.isEmpty()) {
       CarbonUtil.freeMemory(blocksChunkHolder.getDimensionRawDataChunk(),
           blocksChunkHolder.getMeasureRawDataChunk());
+
+      QueryStatistic scanTime = queryStatisticsModel.getStatisticsTypeAndObjMap()
+          .get(QueryStatisticsConstants.SCAN_BLOCKlET_TIME);
+      scanTime.addCountStatistic(QueryStatisticsConstants.SCAN_BLOCKlET_TIME,
+          scanTime.getCount() + (System.currentTimeMillis() - startTime));
+
+      QueryStatistic scannedPages = queryStatisticsModel.getStatisticsTypeAndObjMap()
+          .get(QueryStatisticsConstants.PAGE_SCANNED);
+      scannedPages.addCountStatistic(QueryStatisticsConstants.PAGE_SCANNED,
+          scannedPages.getCount() + bitSetGroup.getScannedPages());
       return createEmptyResult();
     }
 
@@ -162,18 +174,15 @@ public class FilterScanner extends AbstractBlockletScanner {
     validScannedBlockletStatistic
         .addCountStatistic(QueryStatisticsConstants.VALID_SCAN_BLOCKLET_NUM,
             validScannedBlockletStatistic.getCount() + 1);
-    queryStatisticsModel.getRecorder().recordStatistics(validScannedBlockletStatistic);
     // adding statistics for valid number of pages
     QueryStatistic validPages = queryStatisticsModel.getStatisticsTypeAndObjMap()
         .get(QueryStatisticsConstants.VALID_PAGE_SCANNED);
     validPages.addCountStatistic(QueryStatisticsConstants.VALID_PAGE_SCANNED,
         validPages.getCount() + bitSetGroup.getValidPages());
-    queryStatisticsModel.getRecorder().recordStatistics(validPages);
-    QueryStatistic totalBlockletStatistic = queryStatisticsModel.getStatisticsTypeAndObjMap()
-        .get(QueryStatisticsConstants.TOTAL_BLOCKLET_NUM);
-    totalBlockletStatistic.addCountStatistic(QueryStatisticsConstants.TOTAL_BLOCKLET_NUM,
-        totalBlockletStatistic.getCount() + 1);
-    queryStatisticsModel.getRecorder().recordStatistics(totalBlockletStatistic);
+    QueryStatistic scannedPages = queryStatisticsModel.getStatisticsTypeAndObjMap()
+        .get(QueryStatisticsConstants.PAGE_SCANNED);
+    scannedPages.addCountStatistic(QueryStatisticsConstants.PAGE_SCANNED,
+        scannedPages.getCount() + bitSetGroup.getScannedPages());
     int[] rowCount = new int[bitSetGroup.getNumberOfPages()];
     // get the row indexes from bot set
     int[][] indexesGroup = new int[bitSetGroup.getNumberOfPages()][];
@@ -199,8 +208,11 @@ public class FilterScanner extends AbstractBlockletScanner {
     FileHolder fileReader = blocksChunkHolder.getFileReader();
     int[][] allSelectedDimensionBlocksIndexes =
         blockExecutionInfo.getAllSelectedDimensionBlocksIndexes();
+
+    long dimensionReadTime = System.currentTimeMillis();
     DimensionRawColumnChunk[] projectionListDimensionChunk = blocksChunkHolder.getDataBlock()
         .getDimensionChunks(fileReader, allSelectedDimensionBlocksIndexes);
+    dimensionReadTime = System.currentTimeMillis() - dimensionReadTime;
 
     DimensionRawColumnChunk[] dimensionRawColumnChunks =
         new DimensionRawColumnChunk[blockExecutionInfo.getTotalNumberDimensionBlock()];
@@ -216,8 +228,8 @@ public class FilterScanner extends AbstractBlockletScanner {
         dimensionRawColumnChunks[j] = projectionListDimensionChunk[j];
       }
     }
-
-    /*
+    long dimensionReadTime1 = System.currentTimeMillis();
+    /**
      * in case projection if the projected dimension are not loaded in the dimensionColumnDataChunk
      * then loading them
      */
@@ -230,12 +242,15 @@ public class FilterScanner extends AbstractBlockletScanner {
                 .getDimensionChunk(fileReader, projectionListDimensionIndexes[i]);
       }
     }
+    dimensionReadTime += (System.currentTimeMillis() - dimensionReadTime1);
+    dimensionReadTime1 = System.currentTimeMillis();
     MeasureRawColumnChunk[] measureRawColumnChunks =
         new MeasureRawColumnChunk[blockExecutionInfo.getTotalNumberOfMeasureBlock()];
     int[][] allSelectedMeasureBlocksIndexes =
         blockExecutionInfo.getAllSelectedMeasureBlocksIndexes();
     MeasureRawColumnChunk[] projectionListMeasureChunk = blocksChunkHolder.getDataBlock()
         .getMeasureChunks(fileReader, allSelectedMeasureBlocksIndexes);
+    dimensionReadTime += System.currentTimeMillis() - dimensionReadTime1;
     // read the measure chunk blocks which is not present
     for (int i = 0; i < measureRawColumnChunks.length; i++) {
       if (null != blocksChunkHolder.getMeasureRawDataChunk()[i]) {
@@ -248,9 +263,10 @@ public class FilterScanner extends AbstractBlockletScanner {
         measureRawColumnChunks[j] = projectionListMeasureChunk[j];
       }
     }
-    /*
-      in case projection if the projected measure are not loaded in the measureColumnDataChunk
-      then loading them
+    dimensionReadTime1 = System.currentTimeMillis();
+    /**
+     * in case projection if the projected measure are not loaded in the measureColumnDataChunk
+     * then loading them
      */
     int[] projectionListMeasureIndexes = blockExecutionInfo.getProjectionListMeasureIndexes();
     int projectionListMeasureIndexesLength = projectionListMeasureIndexes.length;
@@ -260,6 +276,7 @@ public class FilterScanner extends AbstractBlockletScanner {
             .getMeasureChunk(fileReader, projectionListMeasureIndexes[i]);
       }
     }
+    dimensionReadTime += System.currentTimeMillis() - dimensionReadTime1;
     DimensionColumnDataChunk[][] dimensionColumnDataChunks =
         new DimensionColumnDataChunk[dimensionRawColumnChunks.length][indexesGroup.length];
     MeasureColumnDataChunk[][] measureColumnDataChunks =
@@ -287,9 +304,11 @@ public class FilterScanner extends AbstractBlockletScanner {
     QueryStatistic scanTime = queryStatisticsModel.getStatisticsTypeAndObjMap()
         .get(QueryStatisticsConstants.SCAN_BLOCKlET_TIME);
     scanTime.addCountStatistic(QueryStatisticsConstants.SCAN_BLOCKlET_TIME,
-        scanTime.getCount() + (System.currentTimeMillis() - startTime));
-    queryStatisticsModel.getRecorder().recordStatistics(scanTime);
-
+        scanTime.getCount() + (System.currentTimeMillis() - startTime - dimensionReadTime));
+    QueryStatistic readTime = queryStatisticsModel.getStatisticsTypeAndObjMap()
+        .get(QueryStatisticsConstants.READ_BLOCKlET_TIME);
+    readTime.addCountStatistic(QueryStatisticsConstants.READ_BLOCKlET_TIME,
+        readTime.getCount() + dimensionReadTime);
     return scannedResult;
   }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/8c5540d2/core/src/main/java/org/apache/carbondata/core/stats/QueryStatisticsConstants.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/stats/QueryStatisticsConstants.java b/core/src/main/java/org/apache/carbondata/core/stats/QueryStatisticsConstants.java
index c8fa4a1..c2cda7c 100644
--- a/core/src/main/java/org/apache/carbondata/core/stats/QueryStatisticsConstants.java
+++ b/core/src/main/java/org/apache/carbondata/core/stats/QueryStatisticsConstants.java
@@ -24,7 +24,8 @@ public interface QueryStatisticsConstants {
 
   String LOAD_META = "Time taken to load meta data In Driver Side";
 
-  String LOAD_BLOCKS_DRIVER = "Time taken to load the Block(s) In Driver Side";
+  String LOAD_BLOCKS_DRIVER = "Time taken to load the Block(s) In Driver Side "
+      + "with Block count ";
 
   String BLOCK_ALLOCATION = "Total Time taken in block(s) allocation";
 
@@ -55,6 +56,8 @@ public interface QueryStatisticsConstants {
 
   String TOTAL_PAGE_SCANNED = "The number of total page scanned";
 
+  String PAGE_SCANNED = "The number of page scanned";
+
   // clear no-use statistics timeout
   long CLEAR_STATISTICS_TIMEOUT = 60 * 1000 * 1000000L;
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/8c5540d2/core/src/main/java/org/apache/carbondata/core/stats/QueryStatisticsRecorderImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/stats/QueryStatisticsRecorderImpl.java b/core/src/main/java/org/apache/carbondata/core/stats/QueryStatisticsRecorderImpl.java
index 16abaa4..f84a674 100644
--- a/core/src/main/java/org/apache/carbondata/core/stats/QueryStatisticsRecorderImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/stats/QueryStatisticsRecorderImpl.java
@@ -98,6 +98,7 @@ public class QueryStatisticsRecorderImpl implements QueryStatisticsRecorder, Ser
     long valid_pages_blocklet = 0;
     long total_pages = 0;
     long readTime = 0;
+    long scannedPages = 0;
     try {
       for (QueryStatistic statistic : queryStatistics) {
         switch (statistic.getMessage()) {
@@ -134,6 +135,9 @@ public class QueryStatisticsRecorderImpl implements QueryStatisticsRecorder, Ser
           case QueryStatisticsConstants.READ_BLOCKlET_TIME:
             readTime = statistic.getCount();
             break;
+          case QueryStatisticsConstants.PAGE_SCANNED:
+            scannedPages = statistic.getCount();
+            break;
           default:
             break;
         }
@@ -141,7 +145,7 @@ public class QueryStatisticsRecorderImpl implements QueryStatisticsRecorder, Ser
       String headers =
           "task_id,load_blocks_time,load_dictionary_time,carbon_scan_time,carbon_IO_time, "
               + "total_executor_time,scan_blocks_num,total_blocklets,"
-              + "valid_blocklets,total_pages,valid_pages,result_size";
+              + "valid_blocklets,total_pages,scanned_pages,valid_pages,result_size";
       List<String> values = new ArrayList<String>();
       values.add(queryIWthTask);
       values.add(load_blocks_time + "ms");
@@ -153,6 +157,7 @@ public class QueryStatisticsRecorderImpl implements QueryStatisticsRecorder, Ser
       values.add(String.valueOf(total_blocklet));
       values.add(String.valueOf(valid_scan_blocklet));
       values.add(String.valueOf(total_pages));
+      values.add(String.valueOf(scannedPages));
       values.add(String.valueOf(valid_pages_blocklet));
       values.add(String.valueOf(result_size));
       StringBuilder tableInfo = new StringBuilder();
@@ -174,6 +179,7 @@ public class QueryStatisticsRecorderImpl implements QueryStatisticsRecorder, Ser
       tableInfo.append(line).append("+").append("\n");
       return "Print query statistic for each task id:" + "\n" + tableInfo.toString();
     } catch (Exception ex) {
+      LOGGER.error(ex);
       return "Put statistics into table failed, catch exception: " + ex.getMessage();
     }
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/8c5540d2/core/src/main/java/org/apache/carbondata/core/util/BitSetGroup.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/util/BitSetGroup.java b/core/src/main/java/org/apache/carbondata/core/util/BitSetGroup.java
index 87cbe77..df2d788 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/BitSetGroup.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/BitSetGroup.java
@@ -87,7 +87,18 @@ public class BitSetGroup {
   public int getValidPages() {
     int numberOfPages = 0;
     for (int i = 0; i < bitSets.length; i++) {
-      numberOfPages += (bitSets[i] == null || bitSets[i].isEmpty()) ? 0 : 1;
+      numberOfPages += (bitSets[i] != null && !bitSets[i].isEmpty()) ? 1 : 0;
+    }
+    return numberOfPages;
+  }
+
+  /**
+   * @return return the valid pages
+   */
+  public int getScannedPages() {
+    int numberOfPages = 0;
+    for (int i = 0; i < bitSets.length; i++) {
+      numberOfPages += bitSets[i] == null ? 0 : 1;
     }
     return numberOfPages;
   }


[11/42] carbondata git commit: Test Case Mismatch Fix

Posted by ra...@apache.org.
Test Case Mismatch Fix


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/211c23bb
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/211c23bb
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/211c23bb

Branch: refs/heads/branch-1.1
Commit: 211c23bb1c3f213d296d1658cb0c584214025997
Parents: 59d5545
Author: sounakr <so...@gmail.com>
Authored: Fri May 19 14:34:41 2017 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 12:45:38 2017 +0530

----------------------------------------------------------------------
 .../core/scan/expression/ExpressionResult.java  | 17 +++++++++-
 .../expression/conditional/NotInExpression.java | 34 ++++++++++++++------
 .../RowLevelRangeLessThanFiterExecuterImpl.java | 13 +++++---
 3 files changed, 48 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/211c23bb/core/src/main/java/org/apache/carbondata/core/scan/expression/ExpressionResult.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/expression/ExpressionResult.java b/core/src/main/java/org/apache/carbondata/core/scan/expression/ExpressionResult.java
index e61ab3a..8a0cbe3 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/expression/ExpressionResult.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/expression/ExpressionResult.java
@@ -470,8 +470,23 @@ public class ExpressionResult implements Comparable<ExpressionResult> {
     if (this.value == objToCompare.value) {
       return true;
     }
+
+    if (this.isNull() || objToCompare.isNull()) {
+      return false;
+    }
+
+    // make the comparison against the data type whose precedence is higher like
+    // LONG precedence is higher than INT, so from int value we should get the long value
+    // and then compare both the values. If done vice versa exception will be thrown
+    // and comparison will fail
+    DataType dataType = null;
+    if (objToCompare.getDataType().getPrecedenceOrder() < this.getDataType().getPrecedenceOrder()) {
+      dataType = this.getDataType();
+    } else {
+      dataType = objToCompare.getDataType();
+    }
     try {
-      switch (this.getDataType()) {
+      switch (dataType) {
         case STRING:
           result = this.getString().equals(objToCompare.getString());
           break;

http://git-wip-us.apache.org/repos/asf/carbondata/blob/211c23bb/core/src/main/java/org/apache/carbondata/core/scan/expression/conditional/NotInExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/expression/conditional/NotInExpression.java b/core/src/main/java/org/apache/carbondata/core/scan/expression/conditional/NotInExpression.java
index 67e3a50..9f385ec 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/expression/conditional/NotInExpression.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/expression/conditional/NotInExpression.java
@@ -31,6 +31,7 @@ import org.apache.carbondata.core.scan.filter.intf.RowIntf;
 public class NotInExpression extends BinaryConditionalExpression {
   private static final long serialVersionUID = -6835841923752118034L;
   protected transient Set<ExpressionResult> setOfExprResult;
+  protected transient ExpressionResult nullValuePresent = null;
 
   public NotInExpression(Expression left, Expression right) {
     super(left, right);
@@ -38,23 +39,34 @@ public class NotInExpression extends BinaryConditionalExpression {
 
   @Override public ExpressionResult evaluate(RowIntf value)
       throws FilterUnsupportedException, FilterIllegalMemberException {
+
+    // Both left and right result need to be checked for null because NotInExpression is basically
+    // an And Operation on the list of predicates that are provided.
+    // Example: x in (1,2,null) would be converted to x=1 AND x=2 AND x=null.
+    // If any of the predicates is null then the result is unknown for all the predicates thus
+    // we will return false for each of them.
+    if (nullValuePresent != null) {
+      return nullValuePresent;
+    }
+
     ExpressionResult leftRsult = left.evaluate(value);
+    if (leftRsult.isNull()) {
+      leftRsult.set(DataType.BOOLEAN, false);
+      return leftRsult;
+    }
+
     if (setOfExprResult == null) {
       ExpressionResult val = null;
       ExpressionResult rightRsult = right.evaluate(value);
-      // Both left and right result need to be checked for null because NotInExpression is basically
-      // an And Operation on the list of predicates that are provided.
-      // Example: x in (1,2,null) would be converted to x=1 AND x=2 AND x=null.
-      // If any of the predicates is null then the result is unknown for all the predicates thus
-      // we will return false for each of them.
-      for (ExpressionResult expressionResult: rightRsult.getList()) {
-        if (expressionResult.isNull() || leftRsult.isNull()) {
+      setOfExprResult = new HashSet<ExpressionResult>(10);
+      for (ExpressionResult exprResVal : rightRsult.getList()) {
+
+        if (exprResVal.isNull()) {
+          nullValuePresent = new ExpressionResult(DataType.BOOLEAN, false);
           leftRsult.set(DataType.BOOLEAN, false);
           return leftRsult;
         }
-      }
-      setOfExprResult = new HashSet<ExpressionResult>(10);
-      for (ExpressionResult exprResVal : rightRsult.getList()) {
+
         if (exprResVal.getDataType().getPrecedenceOrder() < leftRsult.getDataType()
             .getPrecedenceOrder()) {
           val = leftRsult;
@@ -88,9 +100,11 @@ public class NotInExpression extends BinaryConditionalExpression {
             throw new FilterUnsupportedException(
                 "DataType: " + val.getDataType() + " not supported for the filter expression");
         }
+
         setOfExprResult.add(val);
       }
     }
+
     leftRsult.set(DataType.BOOLEAN, !setOfExprResult.contains(leftRsult));
     return leftRsult;
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/211c23bb/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanFiterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanFiterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanFiterExecuterImpl.java
index 1883607..5bdf315 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanFiterExecuterImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanFiterExecuterImpl.java
@@ -203,11 +203,14 @@ public class RowLevelRangeLessThanFiterExecuterImpl extends RowLevelFilterExecut
       start = CarbonUtil
           .getFirstIndexUsingBinarySearch(dimensionColumnDataChunk, startIndex, numerOfRows - 1,
               filterValues[i], false);
-      // Logic will handle the case where the range filter member is not present in block
-      // in this case the binary search will return the index from where the bit sets will be
-      // set inorder to apply filters. this is Lesser than filter so the range will be taken
-      // from the prev element which is Lesser than filter member.
-      start = CarbonUtil.nextLesserValueToTarget(start, dimensionColumnDataChunk, filterValues[i]);
+      if (start >= 0) {
+        // Logic will handle the case where the range filter member is not present in block
+        // in this case the binary search will return the index from where the bit sets will be
+        // set inorder to apply filters. this is Lesser than filter so the range will be taken
+        // from the prev element which is Lesser than filter member.
+        start =
+            CarbonUtil.nextLesserValueToTarget(start, dimensionColumnDataChunk, filterValues[i]);
+      }
       if (start < 0) {
         start = -(start + 1);
         if (start >= numerOfRows) {


[21/42] carbondata git commit: Fixed all testcases of IUD in spark 2.1

Posted by ra...@apache.org.
http://git-wip-us.apache.org/repos/asf/carbondata/blob/9d16d504/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/iud/IUDCompactionTestCases.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/iud/IUDCompactionTestCases.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/iud/IUDCompactionTestCases.scala
deleted file mode 100644
index 9da3913..0000000
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/iud/IUDCompactionTestCases.scala
+++ /dev/null
@@ -1,361 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.carbondata.spark.testsuite.iud
-
-import org.apache.spark.sql.Row
-import org.apache.spark.sql.common.util.QueryTest
-import org.scalatest.BeforeAndAfterAll
-
-import org.apache.carbondata.core.constants.CarbonCommonConstants
-import org.apache.carbondata.core.util.CarbonProperties
-
-
-class HorizontalCompactionTestCase extends QueryTest with BeforeAndAfterAll {
-  override def beforeAll {
-
-    sql("""drop database if exists iud4 cascade""")
-    sql("""create database iud4""")
-    sql("""use iud4""")
-    sql(
-      """create table iud4.dest (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/comp1.csv' INTO table iud4.dest""")
-    sql(
-      """create table iud4.source2 (c11 string,c22 int,c33 string,c55 string, c66 int) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/source3.csv' INTO table iud4.source2""")
-    sql("""create table iud4.other (c1 string,c2 int) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/other.csv' INTO table iud4.other""")
-    sql(
-      """create table iud4.hdest (c1 string,c2 int,c3 string,c5 string) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n' STORED AS TEXTFILE""")
-    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/comp1.csv' INTO table iud4.hdest""")
-    sql(
-      """CREATE TABLE iud4.update_01(imei string,age int,task bigint,num double,level decimal(10,3),name string)STORED BY 'org.apache.carbondata.format' """)
-    sql(
-      s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/update01.csv' INTO TABLE iud4.update_01 OPTIONS('BAD_RECORDS_LOGGER_ENABLE' = 'FALSE', 'BAD_RECORDS_ACTION' = 'FORCE') """)
-    CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.isHorizontalCompactionEnabled, "true")
-  }
-
-
-
-  test("test IUD Horizontal Compaction Update Alter Clean") {
-    sql("""drop database if exists iud4 cascade""")
-    sql("""create database iud4""")
-    sql("""use iud4""")
-    sql(
-      """create table dest2 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-
-    sql(s"""load data local inpath '$resourcesPath/IUD/comp1.csv' INTO table dest2""")
-    sql(s"""load data local inpath '$resourcesPath/IUD/comp2.csv' INTO table dest2""")
-    sql(s"""load data local inpath '$resourcesPath/IUD/comp3.csv' INTO table dest2""")
-    sql(s"""load data local inpath '$resourcesPath/IUD/comp4.csv' INTO table dest2""")
-    sql(
-      """create table source2 (c11 string,c22 int,c33 string,c55 string, c66 int) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/source3.csv' INTO table source2""")
-    sql(
-      """update dest2 d set (d.c3, d.c5 ) = (select s.c33,s.c55 from source2 s where d.c1 = s.c11 and s.c22 < 3 or (s.c22 > 10 and s.c22 < 13) or (s.c22 > 20 and s.c22 < 23) or (s.c22 > 30 and s.c22 < 33))""")
-      .show()
-    sql(
-      """update dest2 d set (d.c3, d.c5 ) = (select s.c33,s.c55 from source2 s where d.c1 = s.c11 and (s.c22 > 3 and s.c22 < 5) or (s.c22 > 13 and s.c22 < 15) or (s.c22 > 23 and s.c22 < 25) or (s.c22 > 33 and s.c22 < 35))""")
-      .show()
-    sql(
-      """update dest2 d set (d.c3, d.c5 ) = (select s.c33,s.c55 from source2 s where d.c1 = s.c11 and (s.c22 > 5 and c22 < 8) or (s.c22 > 15 and s.c22 < 18 ) or (s.c22 > 25 and c22 < 28) or (s.c22 > 35 and c22 < 38))""")
-      .show()
-    sql("""alter table dest2 compact 'minor'""")
-    sql("""clean files for table dest2""")
-    checkAnswer(
-      sql("""select c1,c2,c3,c5 from dest2 order by c2"""),
-      Seq(Row("a", 1, "MGM", "Disco"),
-        Row("b", 2, "RGK", "Music"),
-        Row("c", 3, "cc", "ccc"),
-        Row("d", 4, "YDY", "Weather"),
-        Row("e", 5, "ee", "eee"),
-        Row("f", 6, "ff", "fff"),
-        Row("g", 7, "YTY", "Hello"),
-        Row("h", 8, "hh", "hhh"),
-        Row("i", 9, "ii", "iii"),
-        Row("j", 10, "jj", "jjj"),
-        Row("a", 11, "MGM", "Disco"),
-        Row("b", 12, "RGK", "Music"),
-        Row("c", 13, "cc", "ccc"),
-        Row("d", 14, "YDY", "Weather"),
-        Row("e", 15, "ee", "eee"),
-        Row("f", 16, "ff", "fff"),
-        Row("g", 17, "YTY", "Hello"),
-        Row("h", 18, "hh", "hhh"),
-        Row("i", 19, "ii", "iii"),
-        Row("j", 20, "jj", "jjj"),
-        Row("a", 21, "MGM", "Disco"),
-        Row("b", 22, "RGK", "Music"),
-        Row("c", 23, "cc", "ccc"),
-        Row("d", 24, "YDY", "Weather"),
-        Row("e", 25, "ee", "eee"),
-        Row("f", 26, "ff", "fff"),
-        Row("g", 27, "YTY", "Hello"),
-        Row("h", 28, "hh", "hhh"),
-        Row("i", 29, "ii", "iii"),
-        Row("j", 30, "jj", "jjj"),
-        Row("a", 31, "MGM", "Disco"),
-        Row("b", 32, "RGK", "Music"),
-        Row("c", 33, "cc", "ccc"),
-        Row("d", 34, "YDY", "Weather"),
-        Row("e", 35, "ee", "eee"),
-        Row("f", 36, "ff", "fff"),
-        Row("g", 37, "YTY", "Hello"),
-        Row("h", 38, "hh", "hhh"),
-        Row("i", 39, "ii", "iii"),
-        Row("j", 40, "jj", "jjj"))
-    )
-    sql("""drop table dest2""")
-  }
-
-
-  test("test IUD Horizontal Compaction Delete") {
-    sql("""drop database if exists iud4 cascade""")
-    sql("""create database iud4""")
-    sql("""use iud4""")
-    sql(
-      """create table dest2 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""load data local inpath '$resourcesPath/IUD/comp1.csv' INTO table dest2""")
-    sql(s"""load data local inpath '$resourcesPath/IUD/comp2.csv' INTO table dest2""")
-    sql(s"""load data local inpath '$resourcesPath/IUD/comp3.csv' INTO table dest2""")
-    sql(s"""load data local inpath '$resourcesPath/IUD/comp4.csv' INTO table dest2""")
-    sql("""select * from dest2""")
-    sql(
-      """create table source2 (c11 string,c22 int,c33 string,c55 string, c66 int) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/source3.csv' INTO table source2""")
-    sql("""select * from source2""")
-    sql("""delete from dest2 where (c2 < 3) or (c2 > 10 and c2 < 13) or (c2 > 20 and c2 < 23) or (c2 > 30 and c2 < 33)""").show()
-    sql("""select * from dest2 order by 2""")
-    sql("""delete from dest2 where (c2 > 3 and c2 < 5) or (c2 > 13 and c2 < 15) or (c2 > 23 and c2 < 25) or (c2 > 33 and c2 < 35)""").show()
-    sql("""select * from dest2 order by 2""")
-    sql("""delete from dest2 where (c2 > 5 and c2 < 8) or (c2 > 15 and c2 < 18 ) or (c2 > 25 and c2 < 28) or (c2 > 35 and c2 < 38)""").show()
-    sql("""clean files for table dest2""")
-    checkAnswer(
-      sql("""select c1,c2,c3,c5 from dest2 order by c2"""),
-      Seq(Row("c", 3, "cc", "ccc"),
-        Row("e", 5, "ee", "eee"),
-        Row("h", 8, "hh", "hhh"),
-        Row("i", 9, "ii", "iii"),
-        Row("j", 10, "jj", "jjj"),
-        Row("c", 13, "cc", "ccc"),
-        Row("e", 15, "ee", "eee"),
-        Row("h", 18, "hh", "hhh"),
-        Row("i", 19, "ii", "iii"),
-        Row("j", 20, "jj", "jjj"),
-        Row("c", 23, "cc", "ccc"),
-        Row("e", 25, "ee", "eee"),
-        Row("h", 28, "hh", "hhh"),
-        Row("i", 29, "ii", "iii"),
-        Row("j", 30, "jj", "jjj"),
-        Row("c", 33, "cc", "ccc"),
-        Row("e", 35, "ee", "eee"),
-        Row("h", 38, "hh", "hhh"),
-        Row("i", 39, "ii", "iii"),
-        Row("j", 40, "jj", "jjj"))
-    )
-    sql("""drop table dest2""")
-  }
-
-  test("test IUD Horizontal Compaction Multiple Update Vertical Compaction and Clean") {
-    sql("""drop database if exists iud4 cascade""")
-    sql("""create database iud4""")
-    sql("""use iud4""")
-    sql(
-      """create table dest2 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""load data local inpath '$resourcesPath/IUD/comp1.csv' INTO table dest2""")
-    sql(s"""load data local inpath '$resourcesPath/IUD/comp2.csv' INTO table dest2""")
-    sql(s"""load data local inpath '$resourcesPath/IUD/comp3.csv' INTO table dest2""")
-    sql(s"""load data local inpath '$resourcesPath/IUD/comp4.csv' INTO table dest2""")
-    sql(
-      """create table source2 (c11 string,c22 int,c33 string,c55 string, c66 int) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/source3.csv' INTO table source2""")
-    sql("""update dest2 d set (d.c3, d.c5 ) = (select s.c33,s.c55 from source2 s where d.c1 = s.c11 and s.c22 < 3 or (s.c22 > 10 and s.c22 < 13) or (s.c22 > 20 and s.c22 < 23) or (s.c22 > 30 and s.c22 < 33))""").show()
-    sql("""update dest2 d set (d.c3, d.c5 ) = (select s.c11,s.c66 from source2 s where d.c1 = s.c11 and s.c22 < 3 or (s.c22 > 10 and s.c22 < 13) or (s.c22 > 20 and s.c22 < 23) or (s.c22 > 30 and s.c22 < 33))""").show()
-    sql("""update dest2 d set (d.c3, d.c5 ) = (select s.c33,s.c55 from source2 s where d.c1 = s.c11 and (s.c22 > 3 and s.c22 < 5) or (s.c22 > 13 and s.c22 < 15) or (s.c22 > 23 and s.c22 < 25) or (s.c22 > 33 and s.c22 < 35))""").show()
-    sql("""update dest2 d set (d.c3, d.c5 ) = (select s.c11,s.c66 from source2 s where d.c1 = s.c11 and (s.c22 > 3 and s.c22 < 5) or (s.c22 > 13 and s.c22 < 15) or (s.c22 > 23 and s.c22 < 25) or (s.c22 > 33 and s.c22 < 35))""").show()
-    sql("""update dest2 d set (d.c3, d.c5 ) = (select s.c33,s.c55 from source2 s where d.c1 = s.c11 and (s.c22 > 5 and c22 < 8) or (s.c22 > 15 and s.c22 < 18 ) or (s.c22 > 25 and c22 < 28) or (s.c22 > 35 and c22 < 38))""").show()
-    sql("""update dest2 d set (d.c3, d.c5 ) = (select s.c11,s.c66 from source2 s where d.c1 = s.c11 and (s.c22 > 5 and c22 < 8) or (s.c22 > 15 and s.c22 < 18 ) or (s.c22 > 25 and c22 < 28) or (s.c22 > 35 and c22 < 38))""").show()
-    sql("""alter table dest2 compact 'major'""")
-    sql("""clean files for table dest2""")
-    checkAnswer(
-      sql("""select c1,c2,c3,c5 from dest2 order by c2"""),
-      Seq(Row("a", 1, "a", "10"),
-        Row("b", 2, "b", "8"),
-        Row("c", 3, "cc", "ccc"),
-        Row("d", 4, "d", "9"),
-        Row("e", 5, "ee", "eee"),
-        Row("f", 6, "ff", "fff"),
-        Row("g", 7, "g", "12"),
-        Row("h", 8, "hh", "hhh"),
-        Row("i", 9, "ii", "iii"),
-        Row("j", 10, "jj", "jjj"),
-        Row("a", 11, "a", "10"),
-        Row("b", 12, "b", "8"),
-        Row("c", 13, "cc", "ccc"),
-        Row("d", 14, "d", "9"),
-        Row("e", 15, "ee", "eee"),
-        Row("f", 16, "ff", "fff"),
-        Row("g", 17, "g", "12"),
-        Row("h", 18, "hh", "hhh"),
-        Row("i", 19, "ii", "iii"),
-        Row("j", 20, "jj", "jjj"),
-        Row("a", 21, "a", "10"),
-        Row("b", 22, "b", "8"),
-        Row("c", 23, "cc", "ccc"),
-        Row("d", 24, "d", "9"),
-        Row("e", 25, "ee", "eee"),
-        Row("f", 26, "ff", "fff"),
-        Row("g", 27, "g", "12"),
-        Row("h", 28, "hh", "hhh"),
-        Row("i", 29, "ii", "iii"),
-        Row("j", 30, "jj", "jjj"),
-        Row("a", 31, "a", "10"),
-        Row("b", 32, "b", "8"),
-        Row("c", 33, "cc", "ccc"),
-        Row("d", 34, "d", "9"),
-        Row("e", 35, "ee", "eee"),
-        Row("f", 36, "ff", "fff"),
-        Row("g", 37, "g", "12"),
-        Row("h", 38, "hh", "hhh"),
-        Row("i", 39, "ii", "iii"),
-        Row("j", 40, "jj", "jjj"))
-    )
-    sql("""drop table dest2""")
-  }
-
-  test("test IUD Horizontal Compaction Update Delete and Clean") {
-    sql("""drop database if exists iud4 cascade""")
-    sql("""create database iud4""")
-    sql("""use iud4""")
-    sql(
-      """create table dest2 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""load data local inpath '$resourcesPath/IUD/comp1.csv' INTO table dest2""")
-    sql(s"""load data local inpath '$resourcesPath/IUD/comp2.csv' INTO table dest2""")
-    sql(s"""load data local inpath '$resourcesPath/IUD/comp3.csv' INTO table dest2""")
-    sql(s"""load data local inpath '$resourcesPath/IUD/comp4.csv' INTO table dest2""")
-    sql(
-      """create table source2 (c11 string,c22 int,c33 string,c55 string, c66 int) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/source3.csv' INTO table source2""")
-    sql("""update dest2 d set (d.c3, d.c5 ) = (select s.c33,s.c55 from source2 s where d.c1 = s.c11 and s.c22 < 3 or (s.c22 > 10 and s.c22 < 13) or (s.c22 > 20 and s.c22 < 23) or (s.c22 > 30 and s.c22 < 33))""").show()
-    sql("""delete from dest2 where (c2 < 2) or (c2 > 10 and c2 < 13) or (c2 > 20 and c2 < 23) or (c2 > 30 and c2 < 33)""").show()
-    sql("""delete from dest2 where (c2 > 3 and c2 < 5) or (c2 > 13 and c2 < 15) or (c2 > 23 and c2 < 25) or (c2 > 33 and c2 < 35)""").show()
-    sql("""delete from dest2 where (c2 > 5 and c2 < 8) or (c2 > 15 and c2 < 18 ) or (c2 > 25 and c2 < 28) or (c2 > 35 and c2 < 38)""").show()
-    sql("""clean files for table dest2""")
-    checkAnswer(
-      sql("""select c1,c2,c3,c5 from dest2 order by c2"""),
-      Seq(Row("b", 2, "RGK", "Music"),
-        Row("c", 3, "cc", "ccc"),
-        Row("e", 5, "ee", "eee"),
-        Row("h", 8, "hh", "hhh"),
-        Row("i", 9, "ii", "iii"),
-        Row("j", 10, "jj", "jjj"),
-        Row("c", 13, "cc", "ccc"),
-        Row("e", 15, "ee", "eee"),
-        Row("h", 18, "hh", "hhh"),
-        Row("i", 19, "ii", "iii"),
-        Row("j", 20, "jj", "jjj"),
-        Row("c", 23, "cc", "ccc"),
-        Row("e", 25, "ee", "eee"),
-        Row("h", 28, "hh", "hhh"),
-        Row("i", 29, "ii", "iii"),
-        Row("j", 30, "jj", "jjj"),
-        Row("c", 33, "cc", "ccc"),
-        Row("e", 35, "ee", "eee"),
-        Row("h", 38, "hh", "hhh"),
-        Row("i", 39, "ii", "iii"),
-        Row("j", 40, "jj", "jjj"))
-    )
-    sql("""drop table dest2""")
-  }
-
-  test("test IUD Horizontal Compaction Check Column Cardinality") {
-    sql("""drop database if exists iud4 cascade""")
-    sql("""create database iud4""")
-    sql("""use iud4""")
-    sql(
-      """create table T_Carbn01(Active_status String,Item_type_cd INT,Qty_day_avg INT,Qty_total INT,Sell_price BIGINT,Sell_pricep DOUBLE,Discount_price DOUBLE,Profit DECIMAL(3,2),Item_code String,Item_name String,Outlet_name String,Update_time TIMESTAMP,Create_date String)STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/T_Hive1.csv' INTO table t_carbn01 options ('BAD_RECORDS_LOGGER_ENABLE' = 'FALSE', 'BAD_RECORDS_ACTION' = 'FORCE','DELIMITER'=',', 'QUOTECHAR'='\', 'FILEHEADER'='Active_status,Item_type_cd,Qty_day_avg,Qty_total,Sell_price,Sell_pricep,Discount_price,Profit,Item_code,Item_name,Outlet_name,Update_time,Create_date')""")
-    sql("""update t_carbn01 set (item_code) = ('Orange') where item_type_cd = 14""").show()
-    sql("""update t_carbn01 set (item_code) = ('Banana') where item_type_cd = 2""").show()
-    sql("""delete from t_carbn01 where item_code in ('RE3423ee','Orange','Banana')""").show()
-    checkAnswer(
-      sql("""select item_code from t_carbn01 where item_code not in ('RE3423ee','Orange','Banana')"""),
-      Seq(Row("SAD423ee"),
-        Row("DE3423ee"),
-        Row("SE3423ee"),
-        Row("SE3423ee"),
-        Row("SE3423ee"),
-        Row("SE3423ee"))
-    )
-    sql("""drop table t_carbn01""")
-  }
-
-
-  test("test IUD Horizontal Compaction Segment Delete Test Case") {
-    sql("""drop database if exists iud4 cascade""")
-    sql("""create database iud4""")
-    sql("""use iud4""")
-    sql(
-      """create table dest2 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""load data local inpath '$resourcesPath/IUD/comp1.csv' INTO table dest2""")
-    sql(s"""load data local inpath '$resourcesPath/IUD/comp2.csv' INTO table dest2""")
-    sql(s"""load data local inpath '$resourcesPath/IUD/comp3.csv' INTO table dest2""")
-    sql(s"""load data local inpath '$resourcesPath/IUD/comp4.csv' INTO table dest2""")
-    sql(
-      """delete from dest2 where (c2 < 3) or (c2 > 10 and c2 < 13) or (c2 > 20 and c2 < 23) or (c2 > 30 and c2 < 33)""").show()
-    sql("""DELETE SEGMENT 0 FROM TABLE dest2""")
-    sql("""clean files for table dest2""")
-    sql(
-      """update dest2 set (c5) = ('8RAM size') where (c2 > 3 and c2 < 5) or (c2 > 13 and c2 < 15) or (c2 > 23 and c2 < 25) or (c2 > 33 and c2 < 35)""")
-      .show()
-    checkAnswer(
-      sql("""select count(*) from dest2"""),
-      Seq(Row(24))
-    )
-    sql("""drop table dest2""")
-  }
-
-  test("test case full table delete") {
-    sql("""drop database if exists iud4 cascade""")
-    sql("""create database iud4""")
-    sql("""use iud4""")
-    sql(
-      """create table dest2 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""load data local inpath '$resourcesPath/IUD/comp1.csv' INTO table dest2""")
-    sql(s"""load data local inpath '$resourcesPath/IUD/comp2.csv' INTO table dest2""")
-    sql(s"""load data local inpath '$resourcesPath/IUD/comp3.csv' INTO table dest2""")
-    sql(s"""load data local inpath '$resourcesPath/IUD/comp4.csv' INTO table dest2""")
-    sql("""delete from dest2 where c2 < 41""").show()
-    sql("""alter table dest2 compact 'major'""")
-    checkAnswer(
-      sql("""select count(*) from dest2"""),
-      Seq(Row(0))
-    )
-    sql("""drop table dest2""")
-  }
-
-
-  override def afterAll {
-    sql("use default")
-    sql("drop database if exists iud4 cascade")
-    CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.isHorizontalCompactionEnabled , "true")
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/carbondata/blob/9d16d504/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
deleted file mode 100644
index 2fc51b5..0000000
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
+++ /dev/null
@@ -1,393 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.carbondata.spark.testsuite.iud
-
-import org.apache.spark.sql.Row
-import org.apache.spark.sql.common.util.QueryTest
-import org.scalatest.BeforeAndAfterAll
-
-import org.apache.carbondata.core.constants.CarbonCommonConstants
-import org.apache.carbondata.core.util.CarbonProperties
-
-class UpdateCarbonTableTestCase extends QueryTest with BeforeAndAfterAll {
-  override def beforeAll {
-
-    sql("drop database if exists iud cascade")
-    sql("create database iud")
-    sql("use iud")
-    sql("""create table iud.dest (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest""")
-    sql("""create table iud.source2 (c11 string,c22 int,c33 string,c55 string, c66 int) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/source2.csv' INTO table iud.source2""")
-    sql("""create table iud.other (c1 string,c2 int) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/other.csv' INTO table iud.other""")
-    sql("""create table iud.hdest (c1 string,c2 int,c3 string,c5 string) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n' STORED AS TEXTFILE""").show()
-    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.hdest""")
-    sql("""CREATE TABLE iud.update_01(imei string,age int,task bigint,num double,level decimal(10,3),name string)STORED BY 'org.apache.carbondata.format' """)
-    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/update01.csv' INTO TABLE iud.update_01 OPTIONS('BAD_RECORDS_LOGGER_ENABLE' = 'FALSE', 'BAD_RECORDS_ACTION' = 'FORCE') """)
-    CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.isHorizontalCompactionEnabled , "true")
-  }
-
-
-  test("test update operation with 0 rows updation.") {
-    sql("""drop table iud.zerorows""").show
-    sql("""create table iud.zerorows (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.zerorows""")
-    sql("""update zerorows d  set (d.c2) = (d.c2 + 1) where d.c1 = 'a'""").show()
-    sql("""update zerorows d  set (d.c2) = (d.c2 + 1) where d.c1 = 'xxx'""").show()
-     checkAnswer(
-      sql("""select c1,c2,c3,c5 from iud.zerorows"""),
-      Seq(Row("a",2,"aa","aaa"),Row("b",2,"bb","bbb"),Row("c",3,"cc","ccc"),Row("d",4,"dd","ddd"),Row("e",5,"ee","eee"))
-    )
-    sql("""drop table iud.zerorows""").show
-
-
-  }
-
-
-  test("update carbon table[select from source table with where and exist]") {
-      sql("""drop table iud.dest11""").show
-      sql("""create table iud.dest11 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-      sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest11""")
-      sql("""update iud.dest11 d set (d.c3, d.c5 ) = (select s.c33,s.c55 from iud.source2 s where d.c1 = s.c11) where 1 = 1""").show()
-      checkAnswer(
-        sql("""select c3,c5 from iud.dest11"""),
-        Seq(Row("cc","ccc"), Row("dd","ddd"),Row("ee","eee"), Row("MGM","Disco"),Row("RGK","Music"))
-      )
-      sql("""drop table iud.dest11""").show
-   }
-
-   test("update carbon table[using destination table columns with where and exist]") {
-    sql("""drop table iud.dest22""")
-    sql("""create table iud.dest22 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest22""")
-    checkAnswer(
-      sql("""select c2 from iud.dest22 where c1='a'"""),
-      Seq(Row(1))
-    )
-    sql("""update dest22 d  set (d.c2) = (d.c2 + 1) where d.c1 = 'a'""").show()
-    checkAnswer(
-      sql("""select c2 from iud.dest22 where c1='a'"""),
-      Seq(Row(2))
-    )
-    sql("""drop table iud.dest22""")
-   }
-
-   test("update carbon table without alias in set columns") {
-      sql("""drop table iud.dest33""")
-      sql("""create table iud.dest33 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-      sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest33""")
-      sql("""update iud.dest33 d set (c3,c5 ) = (select s.c33 ,s.c55  from iud.source2 s where d.c1 = s.c11) where d.c1 = 'a'""").show()
-      checkAnswer(
-        sql("""select c3,c5 from iud.dest33 where c1='a'"""),
-        Seq(Row("MGM","Disco"))
-      )
-      sql("""drop table iud.dest33""")
-  }
-
-  test("update carbon table without alias in set columns with mulitple loads") {
-    sql("""drop table iud.dest33""")
-    sql("""create table iud.dest33 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest33""")
-    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest33""")
-    sql("""update iud.dest33 d set (c3,c5 ) = (select s.c33 ,s.c55  from iud.source2 s where d.c1 = s.c11) where d.c1 = 'a'""").show()
-    checkAnswer(
-      sql("""select c3,c5 from iud.dest33 where c1='a'"""),
-      Seq(Row("MGM","Disco"),Row("MGM","Disco"))
-    )
-    sql("""drop table iud.dest33""")
-  }
-
-   test("update carbon table without alias in set three columns") {
-     sql("""drop table iud.dest44""")
-     sql("""create table iud.dest44 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-     sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest44""")
-     sql("""update iud.dest44 d set (c1,c3,c5 ) = (select s.c11, s.c33 ,s.c55  from iud.source2 s where d.c1 = s.c11) where d.c1 = 'a'""").show()
-     checkAnswer(
-       sql("""select c1,c3,c5 from iud.dest44 where c1='a'"""),
-       Seq(Row("a","MGM","Disco"))
-     )
-     sql("""drop table iud.dest44""")
-   }
-
-   test("update carbon table[single column select from source with where and exist]") {
-      sql("""drop table iud.dest55""")
-      sql("""create table iud.dest55 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-      sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest55""")
-     sql("""update iud.dest55 d set (c3)  = (select s.c33 from iud.source2 s where d.c1 = s.c11) where 1 = 1""").show()
-      checkAnswer(
-        sql("""select c1,c3 from iud.dest55 """),
-        Seq(Row("a","MGM"),Row("b","RGK"),Row("c","cc"),Row("d","dd"),Row("e","ee"))
-      )
-      sql("""drop table iud.dest55""")
-   }
-
-  test("update carbon table[single column SELECT from source with where and exist]") {
-    sql("""drop table iud.dest55""")
-    sql("""create table iud.dest55 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest55""")
-    sql("""update iud.dest55 d set (c3)  = (SELECT s.c33 from iud.source2 s where d.c1 = s.c11) where 1 = 1""").show()
-    checkAnswer(
-      sql("""select c1,c3 from iud.dest55 """),
-      Seq(Row("a","MGM"),Row("b","RGK"),Row("c","cc"),Row("d","dd"),Row("e","ee"))
-    )
-    sql("""drop table iud.dest55""")
-  }
-
-   test("update carbon table[using destination table columns without where clause]") {
-     sql("""drop table iud.dest66""")
-     sql("""create table iud.dest66 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-     sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest66""")
-     sql("""update iud.dest66 d set (c2, c5 ) = (c2 + 1, concat(c5 , "z"))""").show()
-     checkAnswer(
-       sql("""select c2,c5 from iud.dest66 """),
-       Seq(Row(2,"aaaz"),Row(3,"bbbz"),Row(4,"cccz"),Row(5,"dddz"),Row(6,"eeez"))
-     )
-     sql("""drop table iud.dest66""")
-   }
-
-   test("update carbon table[using destination table columns with where clause]") {
-       sql("""drop table iud.dest77""")
-       sql("""create table iud.dest77 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-       sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest77""")
-       sql("""update iud.dest77 d set (c2, c5 ) = (c2 + 1, concat(c5 , "z")) where d.c3 = 'dd'""").show()
-       checkAnswer(
-         sql("""select c2,c5 from iud.dest77 where c3 = 'dd'"""),
-         Seq(Row(5,"dddz"))
-       )
-       sql("""drop table iud.dest77""")
-   }
-
-   test("update carbon table[using destination table( no alias) columns without where clause]") {
-     sql("""drop table iud.dest88""")
-     sql("""create table iud.dest88 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-     sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest88""")
-     sql("""update iud.dest88  set (c2, c5 ) = (c2 + 1, concat(c5 , "y" ))""").show()
-     checkAnswer(
-       sql("""select c2,c5 from iud.dest88 """),
-       Seq(Row(2,"aaay"),Row(3,"bbby"),Row(4,"cccy"),Row(5,"dddy"),Row(6,"eeey"))
-     )
-     sql("""drop table iud.dest88""")
-   }
-
-   test("update carbon table[using destination table columns with hard coded value ]") {
-     sql("""drop table iud.dest99""")
-     sql("""create table iud.dest99 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-     sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest99""")
-     sql("""update iud.dest99 d set (c2, c5 ) = (c2 + 1, "xyx")""").show()
-     checkAnswer(
-       sql("""select c2,c5 from iud.dest99 """),
-       Seq(Row(2,"xyx"),Row(3,"xyx"),Row(4,"xyx"),Row(5,"xyx"),Row(6,"xyx"))
-     )
-     sql("""drop table iud.dest99""")
-   }
-
-   test("update carbon tableusing destination table columns with hard coded value and where condition]") {
-     sql("""drop table iud.dest110""")
-     sql("""create table iud.dest110 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-     sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest110""")
-     sql("""update iud.dest110 d set (c2, c5 ) = (c2 + 1, "xyx") where d.c1 = 'e'""").show()
-     checkAnswer(
-       sql("""select c2,c5 from iud.dest110 where c1 = 'e' """),
-       Seq(Row(6,"xyx"))
-     )
-     sql("""drop table iud.dest110""")
-   }
-
-   test("update carbon table[using source  table columns with where and exist and no destination table condition]") {
-     sql("""drop table iud.dest120""")
-     sql("""create table iud.dest120 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-     sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest120""")
-     sql("""update iud.dest120 d  set (c3, c5 ) = (select s.c33 ,s.c55  from iud.source2 s where d.c1 = s.c11)""").show()
-     checkAnswer(
-       sql("""select c3,c5 from iud.dest120 """),
-       Seq(Row("MGM","Disco"),Row("RGK","Music"),Row("cc","ccc"),Row("dd","ddd"),Row("ee","eee"))
-     )
-     sql("""drop table iud.dest120""")
-   }
-
-   test("update carbon table[using destination table where and exist]") {
-     sql("""drop table iud.dest130""")
-     sql("""create table iud.dest130 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-     sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest130""")
-     sql("""update iud.dest130 dd  set (c2, c5 ) = (c2 + 1, "xyx")  where dd.c1 = 'a'""").show()
-     checkAnswer(
-       sql("""select c2,c5 from iud.dest130 where c1 = 'a' """),
-       Seq(Row(2,"xyx"))
-     )
-     sql("""drop table iud.dest130""")
-   }
-
-   test("update carbon table[using destination table (concat) where and exist]") {
-     sql("""drop table iud.dest140""")
-     sql("""create table iud.dest140 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-     sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest140""")
-     sql("""update iud.dest140 d set (c2, c5 ) = (c2 + 1, concat(c5 , "z"))  where d.c1 = 'a'""").show()
-     checkAnswer(
-       sql("""select c2,c5 from iud.dest140 where c1 = 'a'"""),
-       Seq(Row(2,"aaaz"))
-     )
-     sql("""drop table iud.dest140""")
-   }
-
-   test("update carbon table[using destination table (concat) with  where") {
-     sql("""drop table iud.dest150""")
-     sql("""create table iud.dest150 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-     sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest150""")
-     sql("""update iud.dest150 d set (c5) = (concat(c5 , "z"))  where d.c1 = 'b'""").show()
-     checkAnswer(
-       sql("""select c5 from iud.dest150 where c1 = 'b' """),
-       Seq(Row("bbbz"))
-     )
-     sql("""drop table iud.dest150""")
-   }
-
-  test("update table with data for datatype mismatch with column ") {
-    sql("""update iud.update_01 set (imei) = ('skt') where level = 'aaa'""")
-    checkAnswer(
-      sql("""select * from iud.update_01 where imei = 'skt'"""),
-      Seq()
-    )
-  }
-
-   test("update carbon table-error[more columns in source table not allowed") {
-     val exception = intercept[Exception] {
-       sql("""update iud.dest d set (c2, c5 ) = (c2 + 1, concat(c5 , "z"), "abc")""").show()
-     }
-     assertResult("Number of source and destination columns are not matching")(exception.getMessage)
-   }
-
-   test("update carbon table-error[no set columns") {
-     intercept[Exception] {
-       sql("""update iud.dest d set () = ()""").show()
-     }
-   }
-
-   test("update carbon table-error[no set columns with updated column") {
-     intercept[Exception] {
-       sql("""update iud.dest d set  = (c1+1)""").show()
-     }
-   }
-   test("update carbon table-error[one set column with two updated column") {
-     intercept[Exception] {
-       sql("""update iud.dest  set c2 = (c2 + 1, concat(c5 , "z") )""").show()
-     }
-   }
-
- test("""update carbon [special characters  in value- test parsing logic ]""") {
-    sql("""drop table iud.dest160""")
-    sql("""create table iud.dest160 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest160""")
-    sql("""update iud.dest160 set(c1) = ("ab\')$*)(&^)")""").show()
-    sql("""update iud.dest160 set(c1) =  ('abd$asjdh$adasj$l;sdf$*)$*)(&^')""").show()
-    sql("""update iud.dest160 set(c1) =("\\")""").show()
-    sql("""update iud.dest160 set(c1) = ("ab\')$*)(&^)")""").show()
-    sql("""update iud.dest160 d set (c3,c5)=(select s.c33,'a\\a' from iud.source2 s where d.c1 = s.c11 and d.c2 = s.c22) where  d.c2 between 1 and 3""").show()
-    sql("""update iud.dest160 d set (c3,c5)=(select s.c33,'\\' from iud.source2 s where d.c1 = s.c11 and d.c2 = s.c22) where  d.c2 between 1 and 3""").show()
-    sql("""update iud.dest160 d set (c3,c5)=(select s.c33,'\\a' from iud.source2 s where d.c1 = s.c11 and d.c2 = s.c22) where  d.c2 between 1 and 3""").show()
-    sql("""update iud.dest160 d set (c3,c5)      =     (select s.c33,'a\\a\\' from iud.source2 s where d.c1 = s.c11 and d.c2 = s.c22) where  d.c2 between 1 and 3""").show()
-    sql("""update iud.dest160 d set (c3,c5) =(select s.c33,'a\'a\\' from iud.source2 s where d.c1 = s.c11 and d.c2 = s.c22) where  d.c2 between 1 and 3""").show()
-    sql("""update iud.dest160 d set (c3,c5)=(select s.c33,'\\a\'a\"' from iud.source2 s where d.c1 = s.c11 and d.c2 = s.c22) where  d.c2 between 1 and 3""").show()
-    sql("""drop table iud.dest160""")
-  }
-
-  test("""update carbon [sub query, between and existing in outer condition.(Customer query ) ]""") {
-    sql("""drop table iud.dest170""")
-    sql("""create table iud.dest170 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest170""")
-    sql("""update iud.dest170 d set (c3)=(select s.c33 from iud.source2 s where d.c1 = s.c11 and d.c2 = s.c22) where  d.c2 between 1 and 3""").show()
-    checkAnswer(
-      sql("""select c3 from  iud.dest170 as d where d.c2 between 1 and 3"""),
-      Seq(Row("MGM"), Row("RGK"), Row("cc"))
-    )
-    sql("""drop table iud.dest170""")
-  }
-
-  test("""update carbon [self join select query ]""") {
-    sql("""drop table iud.dest171""")
-    sql("""create table iud.dest171 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest171""")
-    sql("""update iud.dest171 d set (c3)=(select concat(s.c3 , "z") from iud.dest171 s where d.c2 = s.c2)""").show
-    sql("""drop table iud.dest172""")
-    sql("""create table iud.dest172 (c1 string,c2 int,c3 string,c5 string) STORED BY 'org.apache.carbondata.format'""")
-    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table iud.dest172""")
-    sql("""update iud.dest172 d set (c3)=( concat(c3 , "z"))""").show
-    checkAnswer(
-      sql("""select c3 from  iud.dest171"""),
-      sql("""select c3 from  iud.dest172""")
-    )
-    sql("""drop table iud.dest171""")
-    sql("""drop table iud.dest172""")
-  }
-
-  test("update carbon table-error[closing bracket missed") {
-    intercept[Exception] {
-      sql("""update iud.dest d set (c2) = (194""").show()
-    }
-  }
-
-  test("update carbon table-error[starting bracket missed") {
-    intercept[Exception] {
-      sql("""update iud.dest d set (c2) = 194)""").show()
-    }
-  }
-
-  test("update carbon table-error[missing starting and closing bracket") {
-    intercept[Exception] {
-      sql("""update iud.dest d set (c2) = 194""").show()
-    }
-  }
-
-  test("test create table with column name as tupleID"){
-    intercept[Exception] {
-      sql("CREATE table carbontable (empno int, tupleID String, " +
-          "designation String, doj Timestamp, workgroupcategory int, " +
-          "workgroupcategoryname String, deptno int, deptname String, projectcode int, " +
-          "projectjoindate Timestamp, projectenddate Timestamp, attendance int, " +
-          "utilization int,salary int) STORED BY 'org.apache.carbondata.format' " +
-          "TBLPROPERTIES('DICTIONARY_INCLUDE'='empno,workgroupcategory,deptno,projectcode'," +
-          "'DICTIONARY_EXCLUDE'='empname')")
-    }
-  }
-
-  test("Failure of update operation due to bad record with proper error message") {
-    try {
-      CarbonProperties.getInstance()
-        .addProperty(CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION, "FAIL")
-      val errorMessage = intercept[Exception] {
-        sql("drop table if exists update_with_bad_record")
-        sql("create table update_with_bad_record(item int, name String) stored by 'carbondata'")
-        sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/IUD/bad_record.csv' into table " +
-            s"update_with_bad_record")
-        sql("update update_with_bad_record set (item)=(3.45)").show()
-        sql("drop table if exists update_with_bad_record")
-      }
-      assert(errorMessage.getMessage.contains("Data load failed due to bad record"))
-    } finally {
-      CarbonProperties.getInstance()
-        .addProperty(CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION, "FORCE")
-    }
-  }
-
-  override def afterAll {
-    sql("use default")
-    sql("drop database  if exists iud cascade")
-    CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.isHorizontalCompactionEnabled , "true")
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/9d16d504/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala b/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
index e8627a1..bbdbe4f 100644
--- a/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
+++ b/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
@@ -31,7 +31,7 @@ import org.apache.hadoop.fs.Path
 import org.apache.hadoop.mapreduce.Job
 import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat, FileSplit}
 import org.apache.spark.{SparkEnv, SparkException}
-import org.apache.spark.rdd.{DataLoadCoalescedRDD, DataLoadPartitionCoalescer, UpdateCoalescedRDD}
+import org.apache.spark.rdd.{DataLoadCoalescedRDD, DataLoadPartitionCoalescer, NewHadoopRDD, RDD, UpdateCoalescedRDD}
 import org.apache.spark.sql.{CarbonEnv, DataFrame, Row, SQLContext}
 import org.apache.spark.sql.execution.command.{AlterTableModel, CompactionModel, ExecutionErrors, UpdateTableModel}
 import org.apache.spark.sql.hive.DistributionUtil
@@ -46,9 +46,9 @@ import org.apache.carbondata.core.metadata.{CarbonTableIdentifier, ColumnarForma
 import org.apache.carbondata.core.metadata.schema.table.CarbonTable
 import org.apache.carbondata.core.mutate.CarbonUpdateUtil
 import org.apache.carbondata.core.statusmanager.LoadMetadataDetails
-import org.apache.carbondata.core.util.CarbonProperties
+import org.apache.carbondata.core.util.{ByteUtil, CarbonProperties}
 import org.apache.carbondata.core.util.path.CarbonStorePath
-import org.apache.carbondata.processing.csvload.BlockDetails
+import org.apache.carbondata.processing.csvload.{BlockDetails, CSVInputFormat, StringArrayWritable}
 import org.apache.carbondata.processing.etl.DataLoadingException
 import org.apache.carbondata.processing.merger.{CarbonCompactionUtil, CarbonDataMergerUtil, CompactionType}
 import org.apache.carbondata.processing.model.CarbonLoadModel
@@ -56,7 +56,7 @@ import org.apache.carbondata.processing.newflow.exception.CarbonDataLoadingExcep
 import org.apache.carbondata.spark._
 import org.apache.carbondata.spark.load._
 import org.apache.carbondata.spark.splits.TableSplit
-import org.apache.carbondata.spark.util.{CarbonQueryUtil, CommonUtil}
+import org.apache.carbondata.spark.util.{CarbonQueryUtil, CarbonScalaUtil, CommonUtil}
 
 /**
  * This is the factory class which can create different RDD depends on user needs.
@@ -76,7 +76,8 @@ object CarbonDataRDDFactory {
     if (alterTableModel.compactionType.equalsIgnoreCase("major")) {
       compactionSize = CarbonDataMergerUtil.getCompactionSize(CompactionType.MAJOR_COMPACTION)
       compactionType = CompactionType.MAJOR_COMPACTION
-    } else if (alterTableModel.compactionType.equalsIgnoreCase("IUD_UPDDEL_DELTA_COMPACTION")) {
+    } else if (alterTableModel.compactionType
+      .equalsIgnoreCase(CompactionType.IUD_UPDDEL_DELTA_COMPACTION.toString)) {
       compactionType = CompactionType.IUD_UPDDEL_DELTA_COMPACTION
       if (alterTableModel.segmentUpdateStatusManager.get != None) {
         carbonLoadModel
@@ -653,6 +654,114 @@ object CarbonDataRDDFactory {
         }
       }
 
+      def loadDataFrameForUpdate(): Unit = {
+        def triggerDataLoadForSegment(key: String,
+            iter: Iterator[Row]): Iterator[(String, (LoadMetadataDetails, ExecutionErrors))] = {
+          val rddResult = new updateResultImpl()
+          val LOGGER = LogServiceFactory.getLogService(this.getClass.getName)
+          val resultIter = new Iterator[(String, (LoadMetadataDetails, ExecutionErrors))] {
+            var partitionID = "0"
+            val loadMetadataDetails = new LoadMetadataDetails
+            val executionErrors = new ExecutionErrors(FailureCauses.NONE, "")
+            var uniqueLoadStatusId = ""
+            try {
+              val segId = key
+              val taskNo = CarbonUpdateUtil
+                .getLatestTaskIdForSegment(segId,
+                  CarbonStorePath.getCarbonTablePath(carbonLoadModel.getStorePath,
+                    carbonTable.getCarbonTableIdentifier))
+              val index = taskNo + 1
+              uniqueLoadStatusId = carbonLoadModel.getTableName +
+                                   CarbonCommonConstants.UNDERSCORE +
+                                   (index + "_0")
+
+              // convert timestamp
+              val timeStampInLong = updateModel.get.updatedTimeStamp + ""
+              loadMetadataDetails.setPartitionCount(partitionID)
+              loadMetadataDetails.setLoadName(segId)
+              loadMetadataDetails.setLoadStatus(CarbonCommonConstants.STORE_LOADSTATUS_FAILURE)
+              carbonLoadModel.setPartitionId(partitionID)
+              carbonLoadModel.setSegmentId(segId)
+              carbonLoadModel.setTaskNo(String.valueOf(index))
+              carbonLoadModel.setFactTimeStamp(updateModel.get.updatedTimeStamp)
+
+              // During Block Spill case Increment of File Count and proper adjustment of Block
+              // naming is only done when AbstractFactDataWriter.java : initializeWriter get
+              // CarbondataFileName as null. For handling Block Spill not setting the
+              // CarbondataFileName in case of Update.
+              // carbonLoadModel.setCarbondataFileName(newBlockName)
+
+              // storeLocation = CarbonDataLoadRDD.initialize(carbonLoadModel, index)
+              loadMetadataDetails.setLoadStatus(CarbonCommonConstants.STORE_LOADSTATUS_SUCCESS)
+              val rddIteratorKey = CarbonCommonConstants.RDDUTIL_UPDATE_KEY +
+                                   UUID.randomUUID().toString
+              UpdateDataLoad.DataLoadForUpdate(segId,
+                index,
+                iter,
+                carbonLoadModel,
+                loadMetadataDetails)
+            } catch {
+              case e: Exception =>
+                LOGGER.info("DataLoad failure")
+                LOGGER.error(e)
+                throw e
+            }
+
+            var finished = false
+
+            override def hasNext: Boolean = !finished
+
+            override def next(): (String, (LoadMetadataDetails, ExecutionErrors)) = {
+              finished = true
+              rddResult
+                .getKey(uniqueLoadStatusId,
+                  (loadMetadataDetails, executionErrors))
+            }
+          }
+          resultIter
+        }
+
+        val updateRdd = dataFrame.get.rdd
+
+
+        val keyRDD = updateRdd.map(row =>
+          // splitting as (key, value) i.e., (segment, updatedRows)
+          (row.get(row.size - 1).toString, Row(row.toSeq.slice(0, row.size - 1): _*))
+        )
+        val groupBySegmentRdd = keyRDD.groupByKey()
+
+        val nodeNumOfData = groupBySegmentRdd.partitions.flatMap[String, Array[String]] { p =>
+          DataLoadPartitionCoalescer.getPreferredLocs(groupBySegmentRdd, p).map(_.host)
+        }.distinct.size
+        val nodes = DistributionUtil.ensureExecutorsByNumberAndGetNodeList(nodeNumOfData,
+          sqlContext.sparkContext)
+        val groupBySegmentAndNodeRdd =
+          new UpdateCoalescedRDD[(String, scala.Iterable[Row])](groupBySegmentRdd,
+            nodes.distinct.toArray)
+
+        res = groupBySegmentAndNodeRdd.map(x =>
+          triggerDataLoadForSegment(x._1, x._2.toIterator).toList
+        ).collect()
+
+      }
+
+      def loadDataForPartitionTable(): Unit = {
+        try {
+          val rdd = repartitionInputData(sqlContext, dataFrame, carbonLoadModel)
+          status = new PartitionTableDataLoaderRDD(sqlContext.sparkContext,
+            new DataLoadResultImpl(),
+            carbonLoadModel,
+            currentLoadCount,
+            tableCreationTime,
+            schemaLastUpdatedTime,
+            rdd).collect()
+        } catch {
+          case ex: Exception =>
+            LOGGER.error(ex, "load data failed for partition table")
+            throw ex
+        }
+      }
+
       if (!updateModel.isDefined) {
       CarbonLoaderUtil.checkAndCreateCarbonDataLocation(storePath,
         carbonLoadModel.getDatabaseName, carbonLoadModel.getTableName, currentLoadCount.toString)
@@ -661,10 +770,11 @@ object CarbonDataRDDFactory {
       var errorMessage: String = "DataLoad failure"
       var executorMessage: String = ""
       try {
-        if (dataFrame.isDefined) {
+        if (updateModel.isDefined) {
+          loadDataFrameForUpdate()
+        } else if (dataFrame.isDefined) {
           loadDataFrame()
-        }
-        else {
+        } else {
           loadDataFile()
         }
         if (updateModel.isDefined) {
@@ -743,15 +853,18 @@ object CarbonDataRDDFactory {
       // handle the status file updation for the update cmd.
       if (updateModel.isDefined) {
 
-      if (loadStatus == CarbonCommonConstants.STORE_LOADSTATUS_FAILURE) {
-      // updateModel.get.executorErrors.errorMsg = errorMessage
+        if (loadStatus == CarbonCommonConstants.STORE_LOADSTATUS_FAILURE) {
+          // updateModel.get.executorErrors.errorMsg = errorMessage
           if (updateModel.get.executorErrors.failureCauses == FailureCauses.NONE) {
             updateModel.get.executorErrors.failureCauses = FailureCauses.EXECUTOR_FAILURE
-            updateModel.get.executorErrors.errorMsg = "Update failed as the data load has failed."
+            if (null != executorMessage && !executorMessage.isEmpty) {
+              updateModel.get.executorErrors.errorMsg = executorMessage
+            } else {
+              updateModel.get.executorErrors.errorMsg = "Update failed as the data load has failed."
+            }
           }
           return
-        }
-        else {
+        } else {
           // in success case handle updation of the table status file.
           // success case.
           val segmentDetails = new util.HashSet[String]()

http://git-wip-us.apache.org/repos/asf/carbondata/blob/9d16d504/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonCatalystOperators.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonCatalystOperators.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonCatalystOperators.scala
index 6651abe..0c3414a 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonCatalystOperators.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonCatalystOperators.scala
@@ -90,7 +90,7 @@ case class ShowLoadsCommand(databaseNameOp: Option[String], table: String, limit
 case class ProjectForUpdate(
     table: UnresolvedRelation,
     columns: List[String],
-    child: Seq[LogicalPlan] ) extends Command {
+    children: Seq[LogicalPlan] ) extends LogicalPlan {
   override def output: Seq[AttributeReference] = Seq.empty
 }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/9d16d504/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/IUDCommands.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/IUDCommands.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/IUDCommands.scala
index 39d03bb..01395ff 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/IUDCommands.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/IUDCommands.scala
@@ -23,7 +23,7 @@ import scala.collection.JavaConverters._
 import scala.collection.mutable.ListBuffer
 
 import org.apache.spark.rdd.RDD
-import org.apache.spark.sql.{CarbonDatasourceHadoopRelation, CarbonEnv, DataFrame, Dataset, Row, SparkSession, getDB}
+import org.apache.spark.sql._
 import org.apache.spark.sql.catalyst.TableIdentifier
 import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, Project}
 import org.apache.spark.sql.execution.datasources.LogicalRelation
@@ -566,8 +566,10 @@ object deleteExecution {
     CarbonUpdateUtil
       .createBlockDetailsMap(blockMappingVO, segmentUpdateStatusMngr)
 
-    val rowContRdd = sparkSession.sparkContext.parallelize(blockMappingVO.getCompleteBlockRowDetailVO.asScala.toSeq,
-      keyRdd.partitions.size)
+    val rowContRdd =
+      sparkSession.sparkContext.parallelize(
+        blockMappingVO.getCompleteBlockRowDetailVO.asScala.toSeq,
+          keyRdd.partitions.length)
 
 //    val rowContRdd = sqlContext.sparkContext
 //      .parallelize(blockMappingVO.getCompleteBlockRowDetailVO.asScala.toSeq,
@@ -820,9 +822,9 @@ object UpdateExecution {
     }
     val ex = dataFrame.queryExecution.analyzed
     val res = ex find {
-      case relation: LogicalRelation if (relation.relation.isInstanceOf[CarbonDatasourceHadoopRelation] &&
-        isDestinationRelation(relation.relation
-          .asInstanceOf[CarbonDatasourceHadoopRelation])) =>
+      case relation: LogicalRelation
+        if relation.relation.isInstanceOf[CarbonDatasourceHadoopRelation] &&
+        isDestinationRelation(relation.relation.asInstanceOf[CarbonDatasourceHadoopRelation]) =>
         true
       case _ => false
     }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/9d16d504/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonAnalysisRules.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonAnalysisRules.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonAnalysisRules.scala
index 7d94c92..0fb5c47 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonAnalysisRules.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonAnalysisRules.scala
@@ -24,8 +24,8 @@ import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, Cast, NamedE
 import org.apache.spark.sql.catalyst.plans.Inner
 import org.apache.spark.sql.catalyst.plans.logical._
 import org.apache.spark.sql.catalyst.rules._
-import org.apache.spark.sql.execution.command.ProjectForDeleteCommand
 import org.apache.spark.sql.execution.{ProjectExec, SparkSqlParser, SubqueryExec}
+import org.apache.spark.sql.execution.command.ProjectForDeleteCommand
 import org.apache.spark.sql.execution.datasources.LogicalRelation
 
 import org.apache.carbondata.core.constants.CarbonCommonConstants
@@ -87,6 +87,8 @@ object CarbonIUDAnalysisRule extends Rule[LogicalPlan] {
      this.sparkSession = sparkSession
   }
 
+  private val parser = new SparkSqlParser(sparkSession.sessionState.conf)
+
   private def processUpdateQuery(
       table: UnresolvedRelation,
       columns: List[String],
@@ -102,12 +104,13 @@ object CarbonIUDAnalysisRule extends Rule[LogicalPlan] {
       val projList = Seq(
         UnresolvedAlias(UnresolvedStar(Option(table.alias.toSeq))), tupleId)
       // include tuple id and rest of the required columns in subqury
-      SubqueryAlias(table.alias.getOrElse(""), Project(projList, relation), Option(table.tableIdentifier))
+      SubqueryAlias(table.alias.getOrElse(""),
+        Project(projList, relation), Option(table.tableIdentifier))
     }
     // get the un-analyzed logical plan
     val targetTable = prepareTargetReleation(table)
-    val selectPlan = new SparkSqlParser(sparkSession.sessionState.conf).parsePlan(selectStmt) transform {
-      case Project(projectList, child) if (!includedDestColumns) =>
+    val selectPlan = parser.parsePlan(selectStmt) transform {
+      case Project(projectList, child) if !includedDestColumns =>
         includedDestColumns = true
         if (projectList.size != columns.size) {
           sys.error("Number of source and destination columns are not matching")
@@ -126,11 +129,10 @@ object CarbonIUDAnalysisRule extends Rule[LogicalPlan] {
         val list = Seq(
           UnresolvedAlias(UnresolvedStar(Option(table.alias.toSeq)))) ++ renamedProjectList
         Project(list, child)
-      case Filter(cond, child) if (!includedDestRelation) =>
+      case Filter(cond, child) if !includedDestRelation =>
         includedDestRelation = true
         Filter(cond, Join(child, targetTable, Inner, None))
-      case r @ UnresolvedRelation(t, a) if (!includedDestRelation &&
-                                            t != table.tableIdentifier) =>
+      case r @ UnresolvedRelation(t, a) if !includedDestRelation && t != table.tableIdentifier =>
         includedDestRelation = true
         Join(r, targetTable, Inner, None)
     }
@@ -138,8 +140,8 @@ object CarbonIUDAnalysisRule extends Rule[LogicalPlan] {
       // special case to handle self join queries
       // Eg. update tableName  SET (column1) = (column1+1)
       selectPlan transform {
-        case relation: UnresolvedRelation if (table.tableIdentifier == relation.tableIdentifier &&
-                                              addedTupleId == false) =>
+        case relation: UnresolvedRelation
+          if table.tableIdentifier == relation.tableIdentifier && !addedTupleId =>
           addedTupleId = true
           targetTable
       }
@@ -152,22 +154,17 @@ object CarbonIUDAnalysisRule extends Rule[LogicalPlan] {
       // Create a dummy projection to include filter conditions
       var newPlan: LogicalPlan = null
       if (table.tableIdentifier.database.isDefined) {
-        newPlan = new SparkSqlParser(sparkSession.sessionState.conf).parsePlan("select * from  " +
-                                                                     table.tableIdentifier.database
-                                                                       .getOrElse("") + "." +
-                                                                     table.tableIdentifier.table +
-                                                                     " " + alias + " " +
-                                                                     filter)
+        newPlan = parser.parsePlan("select * from  " +
+           table.tableIdentifier.database.getOrElse("") + "." +
+           table.tableIdentifier.table + " " + alias + " " + filter)
       }
       else {
-        newPlan = new SparkSqlParser(sparkSession.sessionState.conf).parsePlan("select * from  " +
-                                                                     table.tableIdentifier.table +
-                                                                     " " + alias + " " +
-                                                                     filter)
+        newPlan = parser.parsePlan("select * from  " +
+           table.tableIdentifier.table + " " + alias + " " + filter)
       }
       newPlan transform {
-        case UnresolvedRelation(t, Some(a)) if (
-          !transformed && t == table.tableIdentifier && a == alias) =>
+        case UnresolvedRelation(t, Some(a))
+          if !transformed && t == table.tableIdentifier && a == alias =>
           transformed = true
           // Add the filter condition of update statement  on destination table
           SubqueryAlias(alias, updatedSelectPlan, Option(table.tableIdentifier))
@@ -182,18 +179,22 @@ object CarbonIUDAnalysisRule extends Rule[LogicalPlan] {
   }
 
   def processDeleteRecordsQuery(selectStmt: String, table: UnresolvedRelation): LogicalPlan = {
-   // val tid = CarbonTableIdentifierImplicit.toTableIdentifier(Seq(table.tableIdentifier.toString()))
    val tidSeq = Seq(getDB.getDatabaseName(table.tableIdentifier.database, sparkSession),
      table.tableIdentifier.table)
     var addedTupleId = false
-    val selectPlan = new SparkSqlParser(sparkSession.sessionState.conf).parsePlan(selectStmt) transform {
-      case relation: UnresolvedRelation if (table.tableIdentifier == relation.tableIdentifier &&
-                                            addedTupleId == false) =>
+    val parsePlan = parser.parsePlan(selectStmt)
+    val selectPlan = parsePlan transform {
+      case relation: UnresolvedRelation
+        if table.tableIdentifier == relation.tableIdentifier && !addedTupleId =>
         addedTupleId = true
         val tupleId = UnresolvedAlias(Alias(UnresolvedFunction("getTupleId",
           Seq.empty, isDistinct = false), "tupleId")())
+        val alias = table.alias match {
+          case Some(alias) => Some(table.alias.toSeq)
+          case _ => None
+        }
         val projList = Seq(
-          UnresolvedAlias(UnresolvedStar(Option(table.alias.toSeq))), tupleId)
+          UnresolvedAlias(UnresolvedStar(alias)), tupleId)
         // include tuple id in subqury
         Project(projList, relation)
     }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/9d16d504/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala
index cc27181..7a6c513 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala
@@ -122,17 +122,22 @@ class CarbonLateDecodeRule extends Rule[LogicalPlan] with PredicateHelper {
           case Project(pList, child) if (!isTransformed) =>
             val (dest: Seq[NamedExpression], source: Seq[NamedExpression]) = pList
               .splitAt(pList.size - cols.size)
-            val diff = cols.diff(dest.map(_.name))
+            val diff = cols.diff(dest.map(_.name.toLowerCase))
             if (diff.size > 0) {
               sys.error(s"Unknown column(s) ${diff.mkString(",")} in table ${table.tableName}")
             }
             isTransformed = true
-            Project(dest.filter(a => !cols.contains(a.name)) ++ source, child)
+            Project(dest.filter(a => !cols.contains(a.name.toLowerCase)) ++ source, child)
         }
-        ProjectForUpdateCommand(newPlan, Seq(table.tableIdentifier.toString()))
+        val identifier = table.tableIdentifier.database match {
+          case Some(db) => Seq(db, table.tableIdentifier.table)
+          case _ => Seq(table.tableIdentifier.table)
+        }
+        ProjectForUpdateCommand(newPlan, identifier)
     }
   }
 
+
   def isOptimized(plan: LogicalPlan): Boolean = {
     plan find {
       case cd: CarbonDictionaryCatalystDecoder => true

http://git-wip-us.apache.org/repos/asf/carbondata/blob/9d16d504/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
index 367aab4..bff1af3 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
@@ -125,7 +125,7 @@ class CarbonSpark2SqlParser extends CarbonDDLSqlParser {
           case Seq(dbName, tableName) => Some(tableName)
           case Seq(tableName) => Some(tableName)
         }
-        UnresolvedRelation(tableIdentifier, Option(tableAlias.toString))
+        UnresolvedRelation(tableIdentifier, tableAlias)
     }
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/9d16d504/processing/src/main/java/org/apache/carbondata/processing/store/writer/AbstractFactDataWriter.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/store/writer/AbstractFactDataWriter.java b/processing/src/main/java/org/apache/carbondata/processing/store/writer/AbstractFactDataWriter.java
index cda907c..0145c2d 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/store/writer/AbstractFactDataWriter.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/store/writer/AbstractFactDataWriter.java
@@ -520,7 +520,7 @@ public abstract class AbstractFactDataWriter<T> implements CarbonFactDataWriter<
           getMaxOfBlockAndFileSize(fileSizeInBytes, localCarbonFile.getSize()));
     } catch (IOException e) {
       throw new CarbonDataWriterException(
-          "Problem while copying file from local store to carbon store");
+          "Problem while copying file from local store to carbon store", e);
     }
     LOGGER.info(
         "Total copy time (ms) to copy file " + localFileName + " is " + (System.currentTimeMillis()


[16/42] carbondata git commit: Fixed full join query issue with aggregate

Posted by ra...@apache.org.
Fixed full join query issue with aggregate

Fixed in spark-1.6

Fixed style


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/e67003cf
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/e67003cf
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/e67003cf

Branch: refs/heads/branch-1.1
Commit: e67003cf657e743194cf449792b67f896b1adc74
Parents: 0c6f5f3
Author: ravipesala <ra...@gmail.com>
Authored: Tue May 23 10:32:21 2017 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 12:57:35 2017 +0530

----------------------------------------------------------------------
 .../joinquery/AllDataTypesTestCaseJoin.scala    |   9 +-
 .../spark/sql/optimizer/CarbonOptimizer.scala   | 101 ++++++++++++-------
 .../sql/optimizer/CarbonLateDecodeRule.scala    | 101 +++++++++++--------
 3 files changed, 131 insertions(+), 80 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/e67003cf/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/joinquery/AllDataTypesTestCaseJoin.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/joinquery/AllDataTypesTestCaseJoin.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/joinquery/AllDataTypesTestCaseJoin.scala
index be0f8e6..08fad0b 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/joinquery/AllDataTypesTestCaseJoin.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/joinquery/AllDataTypesTestCaseJoin.scala
@@ -28,7 +28,7 @@ import org.scalatest.BeforeAndAfterAll
 class AllDataTypesTestCaseJoin extends QueryTest with BeforeAndAfterAll {
 
   override def beforeAll {
-    sql("CREATE TABLE alldatatypestableJoin (empno int, empname String, designation String, doj Timestamp, workgroupcategory int, workgroupcategoryname String, deptno int, deptname String, projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,utilization int,salary int) STORED BY 'org.apache.carbondata.format'")
+    sql("CREATE TABLE alldatatypestableJoin (empno int, empname String, designation String, doj Timestamp, workgroupcategory int, workgroupcategoryname String, deptno int, deptname String, projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,utilization int,salary int) STORED BY 'org.apache.carbondata.format' TBLPROPERTIES('DICTIONARY_INCLUDE'='empno','TABLE_BLOCKSIZE'='4')")
     sql(s"""LOAD DATA local inpath '$resourcesPath/data.csv' INTO TABLE alldatatypestableJoin OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '\"')""");
 
     sql("CREATE TABLE alldatatypestableJoin_hive (empno int, empname String, designation String, doj Timestamp, workgroupcategory int, workgroupcategoryname String, deptno int, deptname String, projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,utilization int,salary int)row format delimited fields terminated by ','")
@@ -90,6 +90,13 @@ class AllDataTypesTestCaseJoin extends QueryTest with BeforeAndAfterAll {
     sql("DROP TABLE IF EXISTS carbon_table2")
   }
 
+  test("join with aggregate plan") {
+    checkAnswer(sql("SELECT c1.empno,c1.empname, c2.empno FROM (SELECT empno,empname FROM alldatatypestableJoin GROUP BY empno,empname) c1 FULL JOIN " +
+                    "(SELECT empno FROM alldatatypestableJoin GROUP BY empno) c2 ON c1.empno = c2.empno"),
+      sql("SELECT c1.empno,c1.empname, c2.empno FROM (SELECT empno,empname FROM alldatatypestableJoin_hive GROUP BY empno,empname) c1 FULL JOIN " +
+          "(SELECT empno FROM alldatatypestableJoin_hive GROUP BY empno) c2 ON c1.empno = c2.empno"))
+  }
+
   override def afterAll {
     sql("drop table alldatatypestableJoin")
     sql("drop table alldatatypestableJoin_hive")

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e67003cf/integration/spark/src/main/scala/org/apache/spark/sql/optimizer/CarbonOptimizer.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/optimizer/CarbonOptimizer.scala b/integration/spark/src/main/scala/org/apache/spark/sql/optimizer/CarbonOptimizer.scala
index 9aa8158..02ac5f8 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/optimizer/CarbonOptimizer.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/optimizer/CarbonOptimizer.scala
@@ -206,6 +206,47 @@ class ResolveCarbonFunctions(relations: Seq[CarbonDecoderRelation])
     relations.foreach(_.fillAttributeMap(attrMap))
 
     def addTempDecoder(currentPlan: LogicalPlan): LogicalPlan = {
+
+      def transformAggregateExpression(agg: Aggregate,
+          aggonGroups: util.HashSet[AttributeReferenceWrapper] = null): LogicalPlan = {
+        val attrsOndimAggs = new util.HashSet[AttributeReferenceWrapper]
+        if (aggonGroups != null) {
+          attrsOndimAggs.addAll(aggonGroups)
+        }
+        agg.aggregateExpressions.map {
+          case attr: AttributeReference =>
+          case a@Alias(attr: AttributeReference, name) =>
+          case aggExp: AggregateExpression =>
+            aggExp.transform {
+              case aggExp: AggregateExpression =>
+                collectDimensionAggregates(aggExp, attrsOndimAggs, aliasMap, attrMap)
+                aggExp
+            }
+          case others =>
+            others.collect {
+              case attr: AttributeReference
+                if isDictionaryEncoded(attr, attrMap, aliasMap) =>
+                attrsOndimAggs.add(AttributeReferenceWrapper(aliasMap.getOrElse(attr, attr)))
+            }
+        }
+        var child = agg.child
+        // Incase if the child also aggregate then push down decoder to child
+        if (attrsOndimAggs.size() > 0 && !child.equals(agg)) {
+          child = CarbonDictionaryTempDecoder(attrsOndimAggs,
+            new util.HashSet[AttributeReferenceWrapper](),
+            agg.child)
+        }
+        if (!decoder && aggonGroups == null) {
+          decoder = true
+          CarbonDictionaryTempDecoder(new util.HashSet[AttributeReferenceWrapper](),
+            new util.HashSet[AttributeReferenceWrapper](),
+            Aggregate(agg.groupingExpressions, agg.aggregateExpressions, child),
+            isOuter = true)
+        } else {
+          Aggregate(agg.groupingExpressions, agg.aggregateExpressions, child)
+        }
+      }
+
       currentPlan match {
         case limit@Limit(_, child: Sort) =>
           if (!decoder) {
@@ -288,39 +329,7 @@ class ResolveCarbonFunctions(relations: Seq[CarbonDecoderRelation])
           }
 
         case agg: Aggregate if !agg.child.isInstanceOf[CarbonDictionaryTempDecoder] =>
-          val attrsOndimAggs = new util.HashSet[AttributeReferenceWrapper]
-          agg.aggregateExpressions.map {
-            case attr: AttributeReference =>
-            case a@Alias(attr: AttributeReference, name) =>
-            case aggExp: AggregateExpression =>
-              aggExp.transform {
-                case aggExp: AggregateExpression =>
-                  collectDimensionAggregates(aggExp, attrsOndimAggs, aliasMap, attrMap)
-                  aggExp
-              }
-            case others =>
-              others.collect {
-                case attr: AttributeReference
-                  if isDictionaryEncoded(attr, attrMap, aliasMap) =>
-                  attrsOndimAggs.add(AttributeReferenceWrapper(aliasMap.getOrElse(attr, attr)))
-              }
-          }
-          var child = agg.child
-          // Incase if the child also aggregate then push down decoder to child
-          if (attrsOndimAggs.size() > 0 && !child.equals(agg)) {
-            child = CarbonDictionaryTempDecoder(attrsOndimAggs,
-              new util.HashSet[AttributeReferenceWrapper](),
-              agg.child)
-          }
-          if (!decoder) {
-            decoder = true
-            CarbonDictionaryTempDecoder(new util.HashSet[AttributeReferenceWrapper](),
-              new util.HashSet[AttributeReferenceWrapper](),
-              Aggregate(agg.groupingExpressions, agg.aggregateExpressions, child),
-              isOuter = true)
-          } else {
-            Aggregate(agg.groupingExpressions, agg.aggregateExpressions, child)
-          }
+          transformAggregateExpression(agg)
         case expand: Expand if !expand.child.isInstanceOf[CarbonDictionaryTempDecoder] =>
           val attrsOnExpand = new util.HashSet[AttributeReferenceWrapper]
           expand.projections.map {s =>
@@ -410,15 +419,29 @@ class ResolveCarbonFunctions(relations: Seq[CarbonDecoderRelation])
             var rightPlan = j.right
             if (leftCondAttrs.size() > 0 &&
                 !leftPlan.isInstanceOf[CarbonDictionaryCatalystDecoder]) {
-              leftPlan = CarbonDictionaryTempDecoder(leftCondAttrs,
-                new util.HashSet[AttributeReferenceWrapper](),
-                j.left)
+              leftPlan = leftPlan match {
+                case agg: Aggregate =>
+                  CarbonDictionaryTempDecoder(leftCondAttrs,
+                    new util.HashSet[AttributeReferenceWrapper](),
+                    transformAggregateExpression(agg, leftCondAttrs))
+                case _ =>
+                  CarbonDictionaryTempDecoder(leftCondAttrs,
+                    new util.HashSet[AttributeReferenceWrapper](),
+                    j.left)
+              }
             }
             if (rightCondAttrs.size() > 0 &&
                 !rightPlan.isInstanceOf[CarbonDictionaryCatalystDecoder]) {
-              rightPlan = CarbonDictionaryTempDecoder(rightCondAttrs,
-                new util.HashSet[AttributeReferenceWrapper](),
-                j.right)
+              rightPlan = rightPlan match {
+                case agg: Aggregate =>
+                  CarbonDictionaryTempDecoder(rightCondAttrs,
+                    new util.HashSet[AttributeReferenceWrapper](),
+                    transformAggregateExpression(agg, rightCondAttrs))
+                case _ =>
+                  CarbonDictionaryTempDecoder(rightCondAttrs,
+                    new util.HashSet[AttributeReferenceWrapper](),
+                    j.right)
+              }
             }
             if (!decoder) {
               decoder = true

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e67003cf/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala
index fd6f14e..d1a0c90 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala
@@ -178,6 +178,46 @@ class CarbonLateDecodeRule extends Rule[LogicalPlan] with PredicateHelper {
     relations.foreach(_.fillAttributeMap(attrMap))
 
     def addTempDecoder(currentPlan: LogicalPlan): LogicalPlan = {
+
+      def transformAggregateExpression(agg: Aggregate,
+          attrsOnGroup: util.HashSet[AttributeReferenceWrapper] = null): LogicalPlan = {
+        val attrsOndimAggs = new util.HashSet[AttributeReferenceWrapper]
+        if (attrsOnGroup != null) {
+          attrsOndimAggs.addAll(attrsOnGroup)
+        }
+        agg.aggregateExpressions.map {
+          case attr: AttributeReference =>
+          case a@Alias(attr: AttributeReference, name) =>
+          case aggExp: AggregateExpression =>
+            aggExp.transform {
+              case aggExp: AggregateExpression =>
+                collectDimensionAggregates(aggExp, attrsOndimAggs, aliasMap, attrMap)
+                aggExp
+            }
+          case others =>
+            others.collect {
+              case attr: AttributeReference
+                if isDictionaryEncoded(attr, attrMap, aliasMap) =>
+                attrsOndimAggs.add(AttributeReferenceWrapper(aliasMap.getOrElse(attr, attr)))
+            }
+        }
+        var child = agg.child
+        // Incase if the child also aggregate then push down decoder to child
+        if (attrsOndimAggs.size() > 0 && !child.equals(agg)) {
+          child = CarbonDictionaryTempDecoder(attrsOndimAggs,
+            new util.HashSet[AttributeReferenceWrapper](),
+            agg.child)
+        }
+        if (!decoder && attrsOnGroup == null) {
+          decoder = true
+          CarbonDictionaryTempDecoder(new util.HashSet[AttributeReferenceWrapper](),
+            new util.HashSet[AttributeReferenceWrapper](),
+            Aggregate(agg.groupingExpressions, agg.aggregateExpressions, child),
+            isOuter = true)
+        } else {
+          Aggregate(agg.groupingExpressions, agg.aggregateExpressions, child)
+        }
+      }
       currentPlan match {
         case limit@GlobalLimit(_, LocalLimit(_, child: Sort)) =>
           if (!decoder) {
@@ -259,39 +299,7 @@ class CarbonLateDecodeRule extends Rule[LogicalPlan] with PredicateHelper {
             Union(children)
           }
         case agg: Aggregate if !agg.child.isInstanceOf[CarbonDictionaryTempDecoder] =>
-          val attrsOndimAggs = new util.HashSet[AttributeReferenceWrapper]
-          agg.aggregateExpressions.map {
-            case attr: AttributeReference =>
-            case a@Alias(attr: AttributeReference, name) =>
-            case aggExp: AggregateExpression =>
-              aggExp.transform {
-                case aggExp: AggregateExpression =>
-                  collectDimensionAggregates(aggExp, attrsOndimAggs, aliasMap, attrMap)
-                  aggExp
-              }
-            case others =>
-              others.collect {
-                case attr: AttributeReference
-                  if isDictionaryEncoded(attr, attrMap, aliasMap) =>
-                  attrsOndimAggs.add(AttributeReferenceWrapper(aliasMap.getOrElse(attr, attr)))
-              }
-          }
-          var child = agg.child
-          // Incase if the child also aggregate then push down decoder to child
-          if (attrsOndimAggs.size() > 0 && !child.equals(agg)) {
-            child = CarbonDictionaryTempDecoder(attrsOndimAggs,
-              new util.HashSet[AttributeReferenceWrapper](),
-              agg.child)
-          }
-          if (!decoder) {
-            decoder = true
-            CarbonDictionaryTempDecoder(new util.HashSet[AttributeReferenceWrapper](),
-              new util.HashSet[AttributeReferenceWrapper](),
-              Aggregate(agg.groupingExpressions, agg.aggregateExpressions, child),
-              isOuter = true)
-          } else {
-            Aggregate(agg.groupingExpressions, agg.aggregateExpressions, child)
-          }
+          transformAggregateExpression(agg)
         case expand: Expand if !expand.child.isInstanceOf[CarbonDictionaryTempDecoder] =>
           val attrsOnExpand = new util.HashSet[AttributeReferenceWrapper]
           expand.projections.map {s =>
@@ -381,15 +389,29 @@ class CarbonLateDecodeRule extends Rule[LogicalPlan] with PredicateHelper {
             var rightPlan = j.right
             if (leftCondAttrs.size() > 0 &&
                 !leftPlan.isInstanceOf[CarbonDictionaryCatalystDecoder]) {
-              leftPlan = CarbonDictionaryTempDecoder(leftCondAttrs,
-                new util.HashSet[AttributeReferenceWrapper](),
-                j.left)
+              leftPlan = leftPlan match {
+                case agg: Aggregate =>
+                  CarbonDictionaryTempDecoder(leftCondAttrs,
+                    new util.HashSet[AttributeReferenceWrapper](),
+                    transformAggregateExpression(agg, leftCondAttrs))
+                case _ =>
+                  CarbonDictionaryTempDecoder(leftCondAttrs,
+                    new util.HashSet[AttributeReferenceWrapper](),
+                    j.left)
+              }
             }
             if (rightCondAttrs.size() > 0 &&
                 !rightPlan.isInstanceOf[CarbonDictionaryCatalystDecoder]) {
-              rightPlan = CarbonDictionaryTempDecoder(rightCondAttrs,
-                new util.HashSet[AttributeReferenceWrapper](),
-                j.right)
+              rightPlan = rightPlan match {
+                case agg: Aggregate =>
+                  CarbonDictionaryTempDecoder(rightCondAttrs,
+                    new util.HashSet[AttributeReferenceWrapper](),
+                    transformAggregateExpression(agg, rightCondAttrs))
+                case _ =>
+                  CarbonDictionaryTempDecoder(rightCondAttrs,
+                    new util.HashSet[AttributeReferenceWrapper](),
+                    j.right)
+              }
             }
             Join(leftPlan, rightPlan, j.joinType, j.condition)
           } else {
@@ -503,7 +525,6 @@ class CarbonLateDecodeRule extends Rule[LogicalPlan] with PredicateHelper {
 
         case others => others
       }
-
     }
 
     val transFormedPlan =


[42/42] carbondata git commit: Resolved compilations and test failures after merging from master.

Posted by ra...@apache.org.
Resolved compilations and test failures after merging from master.


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/c05523d0
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/c05523d0
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/c05523d0

Branch: refs/heads/branch-1.1
Commit: c05523d0df68f618ca36b0ef4cca8bd92c4d0239
Parents: 02f06fd
Author: ravipesala <ra...@gmail.com>
Authored: Thu Jun 15 17:18:49 2017 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 17:18:49 2017 +0530

----------------------------------------------------------------------
 .../schema/table/column/CarbonDimension.java       |  2 +-
 .../executer/RangeValueFilterExecuterImpl.java     |  2 +-
 ...wLevelRangeLessThanEqualFilterExecuterImpl.java |  2 +-
 .../RowLevelRangeLessThanFiterExecuterImpl.java    |  2 +-
 .../AbstractDetailQueryResultIterator.java         |  4 +---
 .../testsuite/dataload/TestBatchSortDataLoad.scala |  6 +++---
 .../testsuite/dataload/TestLoadDataFrame.scala     |  4 ++--
 .../spark/rdd/CarbonDataRDDFactory.scala           |  3 +--
 .../spark/rdd/CarbonDataRDDFactory.scala           | 17 -----------------
 .../apache/spark/sql/common/util/QueryTest.scala   |  5 +++--
 pom.xml                                            |  4 ----
 .../processing/newflow/DataLoadProcessBuilder.java |  1 -
 .../newflow/sort/unsafe/UnsafeSortDataRows.java    | 13 +++++++++++++
 13 files changed, 27 insertions(+), 38 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/c05523d0/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/column/CarbonDimension.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/column/CarbonDimension.java b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/column/CarbonDimension.java
index 8d02512..23f4d6c 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/column/CarbonDimension.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/column/CarbonDimension.java
@@ -125,7 +125,7 @@ public class CarbonDimension extends CarbonColumn {
    * @return is column participated in sorting or not
    */
   public boolean isSortColumn() {
-    return this.columnSchema.isSortColumn();
+    return !isComplex();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/carbondata/blob/c05523d0/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RangeValueFilterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RangeValueFilterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RangeValueFilterExecuterImpl.java
index 6823531..12661d2 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RangeValueFilterExecuterImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RangeValueFilterExecuterImpl.java
@@ -556,7 +556,7 @@ public class RangeValueFilterExecuterImpl extends ValueBasedFilterExecuterImpl {
         CarbonDimension currentBlockDimension =
             segmentProperties.getDimensions().get(dimensionBlocksIndex);
         defaultValue = FilterUtil.getMaskKey(key, currentBlockDimension,
-            this.segmentProperties.getSortColumnsGenerator());
+            this.segmentProperties.getDimensionKeyGenerator());
       } else {
         defaultValue = CarbonCommonConstants.MEMBER_DEFAULT_VAL_ARRAY;
       }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/c05523d0/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanEqualFilterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanEqualFilterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanEqualFilterExecuterImpl.java
index d694960..eaf58a4 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanEqualFilterExecuterImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanEqualFilterExecuterImpl.java
@@ -155,7 +155,7 @@ public class RowLevelRangeLessThanEqualFilterExecuterImpl extends RowLevelFilter
       CarbonDimension currentBlockDimension =
           segmentProperties.getDimensions().get(dimensionBlocksIndex[0]);
       defaultValue = FilterUtil.getMaskKey(key, currentBlockDimension,
-          this.segmentProperties.getSortColumnsGenerator());
+          this.segmentProperties.getDimensionKeyGenerator());
     }
     BitSet bitSet = null;
     if (dimensionColumnDataChunk.isExplicitSorted()) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/c05523d0/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanFiterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanFiterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanFiterExecuterImpl.java
index b3dd921..e9b6408 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanFiterExecuterImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanFiterExecuterImpl.java
@@ -156,7 +156,7 @@ public class RowLevelRangeLessThanFiterExecuterImpl extends RowLevelFilterExecut
       CarbonDimension currentBlockDimension =
           segmentProperties.getDimensions().get(dimensionBlocksIndex[0]);
       defaultValue = FilterUtil.getMaskKey(key, currentBlockDimension,
-          this.segmentProperties.getSortColumnsGenerator());
+          this.segmentProperties.getDimensionKeyGenerator());
     }
     BitSet bitSet = null;
     if (dimensionColumnDataChunk.isExplicitSorted()) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/c05523d0/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/AbstractDetailQueryResultIterator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/AbstractDetailQueryResultIterator.java b/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/AbstractDetailQueryResultIterator.java
index 92e9594..4839cb5 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/AbstractDetailQueryResultIterator.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/AbstractDetailQueryResultIterator.java
@@ -115,9 +115,7 @@ public abstract class AbstractDetailQueryResultIterator<E> extends CarbonIterato
   private void intialiseInfos() {
     for (BlockExecutionInfo blockInfo : blockExecutionInfos) {
       Map<String, DeleteDeltaVo> deletedRowsMap = null;
-      DataRefNodeFinder finder = new BTreeDataRefNodeFinder(blockInfo.getEachColumnValueSize(),
-          blockInfo.getDataBlock().getSegmentProperties().getNumberOfSortColumns(),
-          blockInfo.getDataBlock().getSegmentProperties().getNumberOfNoDictSortColumns());
+      DataRefNodeFinder finder = new BTreeDataRefNodeFinder(blockInfo.getEachColumnValueSize());
       // if delete delta file is present
       if (null != blockInfo.getDeleteDeltaFilePath() && 0 != blockInfo
           .getDeleteDeltaFilePath().length) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/c05523d0/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestBatchSortDataLoad.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestBatchSortDataLoad.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestBatchSortDataLoad.scala
index d53b5e5..af59cde 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestBatchSortDataLoad.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestBatchSortDataLoad.scala
@@ -86,7 +86,7 @@ class TestBatchSortDataLoad extends QueryTest with BeforeAndAfterAll {
 
     checkAnswer(sql("select count(*) from carbon_load1"), Seq(Row(100000)))
 
-    assert(getIndexfileCount("carbon_load1") == 5, "Something wrong in batch sort")
+    assert(getIndexfileCount("carbon_load1") == 6, "Something wrong in batch sort")
   }
 
   test("test batch sort load by passing option to load command and compare with normal load") {
@@ -167,7 +167,7 @@ class TestBatchSortDataLoad extends QueryTest with BeforeAndAfterAll {
 
     checkAnswer(sql("select count(*) from carbon_load3"), Seq(Row(100000)))
 
-    assert(getIndexfileCount("carbon_load3") == 5, "Something wrong in batch sort")
+    assert(getIndexfileCount("carbon_load3") == 6, "Something wrong in batch sort")
 
     checkAnswer(sql("select * from carbon_load3 where c1='a1' order by c1"),
       sql("select * from carbon_load2 where c1='a1' order by c1"))
@@ -188,7 +188,7 @@ class TestBatchSortDataLoad extends QueryTest with BeforeAndAfterAll {
 
     checkAnswer(sql("select count(*) from carbon_load4"), Seq(Row(100000)))
 
-    assert(getIndexfileCount("carbon_load4") == 5, "Something wrong in batch sort")
+    assert(getIndexfileCount("carbon_load4") == 6, "Something wrong in batch sort")
     CarbonProperties.getInstance().
       addProperty(CarbonCommonConstants.LOAD_SORT_SCOPE,
         CarbonCommonConstants.LOAD_SORT_SCOPE_DEFAULT)

http://git-wip-us.apache.org/repos/asf/carbondata/blob/c05523d0/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataFrame.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataFrame.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataFrame.scala
index 9179c08..994acf6 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataFrame.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataFrame.scala
@@ -171,7 +171,7 @@ class TestLoadDataFrame extends QueryTest with BeforeAndAfterAll {
 
   test("test load dataframe with single pass enabled") {
     // save dataframe to carbon file
-    df.write
+    df2.write
       .format("carbondata")
       .option("tableName", "carbon8")
       .option("tempCSV", "false")
@@ -186,7 +186,7 @@ class TestLoadDataFrame extends QueryTest with BeforeAndAfterAll {
 
   test("test load dataframe with single pass disabled") {
     // save dataframe to carbon file
-    df.write
+    df2.write
       .format("carbondata")
       .option("tableName", "carbon9")
       .option("tempCSV", "true")

http://git-wip-us.apache.org/repos/asf/carbondata/blob/c05523d0/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
index dfea7d7..f282f69 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
@@ -48,9 +48,8 @@ import org.apache.carbondata.core.mutate.CarbonUpdateUtil
 import org.apache.carbondata.core.statusmanager.LoadMetadataDetails
 import org.apache.carbondata.core.util.CarbonProperties
 import org.apache.carbondata.core.util.path.CarbonStorePath
-import org.apache.carbondata.processing.csvload.BlockDetails
 import org.apache.carbondata.processing.constants.LoggerAction
-import org.apache.carbondata.processing.csvload.{BlockDetails, CSVInputFormat, StringArrayWritable}
+import org.apache.carbondata.processing.csvload.BlockDetails
 import org.apache.carbondata.processing.etl.DataLoadingException
 import org.apache.carbondata.processing.merger.{CarbonCompactionUtil, CarbonDataMergerUtil, CompactionType}
 import org.apache.carbondata.processing.model.CarbonLoadModel

http://git-wip-us.apache.org/repos/asf/carbondata/blob/c05523d0/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala b/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
index 96a8062..124036c 100644
--- a/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
+++ b/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
@@ -752,23 +752,6 @@ object CarbonDataRDDFactory {
 
       }
 
-      def loadDataForPartitionTable(): Unit = {
-        try {
-          val rdd = repartitionInputData(sqlContext, dataFrame, carbonLoadModel)
-          status = new PartitionTableDataLoaderRDD(sqlContext.sparkContext,
-            new DataLoadResultImpl(),
-            carbonLoadModel,
-            currentLoadCount,
-            tableCreationTime,
-            schemaLastUpdatedTime,
-            rdd).collect()
-        } catch {
-          case ex: Exception =>
-            LOGGER.error(ex, "load data failed for partition table")
-            throw ex
-        }
-      }
-
       if (!updateModel.isDefined) {
       CarbonLoaderUtil.checkAndCreateCarbonDataLocation(storePath,
         currentLoadCount.toString, carbonTable)

http://git-wip-us.apache.org/repos/asf/carbondata/blob/c05523d0/integration/spark2/src/test/scala/org/apache/spark/sql/common/util/QueryTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/spark/sql/common/util/QueryTest.scala b/integration/spark2/src/test/scala/org/apache/spark/sql/common/util/QueryTest.scala
index c37ea1e..be91df8 100644
--- a/integration/spark2/src/test/scala/org/apache/spark/sql/common/util/QueryTest.scala
+++ b/integration/spark2/src/test/scala/org/apache/spark/sql/common/util/QueryTest.scala
@@ -23,7 +23,7 @@ import scala.collection.JavaConversions._
 
 import org.apache.spark.sql.catalyst.plans._
 import org.apache.spark.sql.catalyst.util._
-import org.apache.spark.sql.hive.HiveExternalCatalog
+import org.apache.spark.sql.hive.{CarbonSessionState, HiveExternalCatalog}
 import org.apache.spark.sql.test.TestQueryExecutor
 import org.apache.spark.sql.{DataFrame, Row}
 
@@ -40,7 +40,8 @@ class QueryTest extends PlanTest {
 
   val sqlContext = TestQueryExecutor.INSTANCE.sqlContext
 
-  val hiveClient = sqlContext.sharedState.externalCatalog.asInstanceOf[HiveExternalCatalog].client
+  val hiveClient = sqlContext.sparkSession.sessionState.asInstanceOf[CarbonSessionState]
+    .metadataHive
 
   val resourcesPath = TestQueryExecutor.resourcesPath
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/c05523d0/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index f2c3aa7..3ee15bc 100644
--- a/pom.xml
+++ b/pom.xml
@@ -128,10 +128,6 @@
       <id>pentaho-releases</id>
       <url>http://repository.pentaho.org/artifactory/repo/</url>
     </repository>
-    <repository>
-      <id>carbondata-releases</id>
-      <url>http://136.243.101.176:9091/repository/carbondata/</url>
-    </repository>
   </repositories>
 
   <dependencyManagement>

http://git-wip-us.apache.org/repos/asf/carbondata/blob/c05523d0/processing/src/main/java/org/apache/carbondata/processing/newflow/DataLoadProcessBuilder.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/newflow/DataLoadProcessBuilder.java b/processing/src/main/java/org/apache/carbondata/processing/newflow/DataLoadProcessBuilder.java
index 5c7c035..a94abd3 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/newflow/DataLoadProcessBuilder.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/newflow/DataLoadProcessBuilder.java
@@ -36,7 +36,6 @@ import org.apache.carbondata.core.util.CarbonProperties;
 import org.apache.carbondata.processing.model.CarbonLoadModel;
 import org.apache.carbondata.processing.newflow.constants.DataLoadProcessorConstants;
 import org.apache.carbondata.processing.newflow.sort.SortScopeOptions;
-import org.apache.carbondata.processing.newflow.steps.CarbonRowDataWriterProcessorStepImpl;
 import org.apache.carbondata.processing.newflow.steps.DataConverterProcessorStepImpl;
 import org.apache.carbondata.processing.newflow.steps.DataConverterProcessorWithBucketingStepImpl;
 import org.apache.carbondata.processing.newflow.steps.DataWriterBatchProcessorStepImpl;

http://git-wip-us.apache.org/repos/asf/carbondata/blob/c05523d0/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/unsafe/UnsafeSortDataRows.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/unsafe/UnsafeSortDataRows.java b/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/unsafe/UnsafeSortDataRows.java
index b4daa51..8872dd4 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/unsafe/UnsafeSortDataRows.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/newflow/sort/unsafe/UnsafeSortDataRows.java
@@ -198,6 +198,19 @@ public class UnsafeSortDataRows {
 
   /**
    * This method will be used to add new row
+   *
+   * @param rowBatch new rowBatch
+   * @throws CarbonSortKeyAndGroupByException problem while writing
+   */
+  public void addRowBatchWithOutSync(Object[][] rowBatch, int size)
+      throws CarbonSortKeyAndGroupByException {
+    // if record holder list size is equal to sort buffer size then it will
+    // sort the list and then write current list data to file
+    addBatch(rowBatch, size);
+  }
+
+  /**
+   * This method will be used to add new row
    */
   public void addRow(Object[] row) throws CarbonSortKeyAndGroupByException {
     // if record holder list size is equal to sort buffer size then it will


[14/42] carbondata git commit: move testcase and fix

Posted by ra...@apache.org.
move testcase and fix


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/a8b67261
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/a8b67261
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/a8b67261

Branch: refs/heads/branch-1.1
Commit: a8b672610ef845fffec5f6f9062e51ad040bcad4
Parents: 959e851
Author: jackylk <ja...@huawei.com>
Authored: Mon May 22 22:08:58 2017 +0800
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Jun 15 12:57:13 2017 +0530

----------------------------------------------------------------------
 .../impl/DictionaryBasedResultCollector.java    |   1 +
 .../complexType/TestComplexTypeQuery.scala      | 289 +++++++++++++++++++
 .../complexType/TestComplexTypeQuery.scala      | 289 -------------------
 3 files changed, 290 insertions(+), 289 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/a8b67261/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedResultCollector.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedResultCollector.java b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedResultCollector.java
index e5e4b3c..b784f94 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedResultCollector.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedResultCollector.java
@@ -147,6 +147,7 @@ public class DictionaryBasedResultCollector extends AbstractScannedResultCollect
       row[order[i]] = comlexDimensionInfoMap.get(queryDimensions[i].getDimension().getOrdinal())
           .getDataBasedOnDataTypeFromSurrogates(
               ByteBuffer.wrap(complexTypeKeyArray[complexTypeColumnIndex++]));
+      dictionaryColumnIndex++;
     } else {
       row[order[i]] = surrogateResult[actualIndexInSurrogateKey[dictionaryColumnIndex++]];
     }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/a8b67261/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexTypeQuery.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexTypeQuery.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexTypeQuery.scala
new file mode 100644
index 0000000..c2c15eb
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexTypeQuery.scala
@@ -0,0 +1,289 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.integration.spark.testsuite.complexType
+
+import org.apache.spark.sql.Row
+import org.apache.spark.sql.common.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+/**
+ * Test class of creating and loading for carbon table with double
+ *
+ */
+class TestComplexTypeQuery extends QueryTest with BeforeAndAfterAll {
+
+  override def beforeAll: Unit = {
+    sql("drop table if exists complexcarbontable")
+    sql("drop table if exists complexhivetable")
+    sql("drop table if exists complex_filter")
+    sql("drop table if exists structusingstructCarbon")
+    sql("drop table if exists structusingstructHive")
+    sql("drop table if exists structusingarraycarbon")
+    sql("drop table if exists structusingarrayhive")
+    sql(
+      "create table complexcarbontable(deviceInformationId int, channelsId string, ROMSize " +
+      "string, ROMName String, purchasedate string, mobile struct<imei:string, imsi:string>, MAC " +
+      "array<string>, locationinfo array<struct<ActiveAreaId:int, ActiveCountry:string, " +
+      "ActiveProvince:string, Activecity:string, ActiveDistrict:string, ActiveStreet:string>>, " +
+      "proddate struct<productionDate:string,activeDeactivedate:array<string>>, gamePointId " +
+      "double,contractNumber double)  STORED BY 'org.apache.carbondata.format'  TBLPROPERTIES " +
+      "('DICTIONARY_INCLUDE'='deviceInformationId', 'DICTIONARY_EXCLUDE'='channelsId'," +
+      "'COLUMN_GROUP'='(ROMSize,ROMName)')")
+    sql("LOAD DATA local inpath '" + resourcesPath +
+        "/complextypesample.csv' INTO table complexcarbontable  OPTIONS('DELIMITER'=',', " +
+        "'QUOTECHAR'='\"', 'FILEHEADER'='deviceInformationId,channelsId,ROMSize,ROMName," +
+        "purchasedate,mobile,MAC,locationinfo,proddate,gamePointId,contractNumber', " +
+        "'COMPLEX_DELIMITER_LEVEL_1'='$', 'COMPLEX_DELIMITER_LEVEL_2'=':')");
+    sql(
+      "create table complexhivetable(deviceInformationId int, channelsId string, ROMSize string, " +
+      "ROMName String, purchasedate string, mobile struct<imei:string, imsi:string>, MAC " +
+      "array<string>, locationinfo array<struct<ActiveAreaId:int, ActiveCountry:string, " +
+      "ActiveProvince:string, Activecity:string, ActiveDistrict:string, ActiveStreet:string>>, " +
+      "proddate struct<productionDate:string,activeDeactivedate:array<string>>, gamePointId " +
+      "double,contractNumber double)row format delimited fields terminated by ',' collection " +
+      "items terminated by '$' map keys terminated by ':'")
+    sql(s"LOAD DATA local inpath '$resourcesPath/complextypesample.csv' INTO table " +
+        s"complexhivetable")
+    sql(
+      "create table complex_filter(test1 int, test2 array<String>,test3 array<bigint>,test4 " +
+      "array<int>,test5 array<decimal>,test6 array<timestamp>,test7 array<double>) STORED BY 'org" +
+      ".apache.carbondata.format'")
+    sql("LOAD DATA INPATH '" + resourcesPath +
+        "/array1.csv'  INTO TABLE complex_filter options ('DELIMITER'=',', 'QUOTECHAR'='\"', " +
+        "'COMPLEX_DELIMITER_LEVEL_1'='$', 'FILEHEADER'= 'test1,test2,test3,test4,test5,test6," +
+        "test7')")
+      ()
+
+    sql(
+      "create table structusingarraycarbon (MAC struct<MAC1:array<string>," +
+      "ActiveCountry:array<string>>) STORED BY 'org.apache.carbondata.format'");
+    sql("LOAD DATA local INPATH '" + resourcesPath +
+        "/struct_all.csv' INTO table structusingarraycarbon options ('DELIMITER'=',', " +
+        "'QUOTECHAR'='\"', 'FILEHEADER'='MAC','COMPLEX_DELIMITER_LEVEL_1'='$'," +
+        "'COMPLEX_DELIMITER_LEVEL_2'='&')")
+    sql(
+      "create table structusingarrayhive (MAC struct<MAC1:array<string>," +
+      "ActiveCountry:array<string>>)row format delimited fields terminated by ',' collection " +
+      "items terminated by '$' map keys terminated by '&'");
+    sql("LOAD DATA local INPATH '" + resourcesPath +
+        "/struct_all.csv' INTO table structusingarrayhive")
+
+    sql(
+      "create table structusingstructCarbon(name struct<middlename:string, " +
+      "othernames:struct<firstname:string,lastname:string>,age:int> ) STORED BY 'org.apache" +
+      ".carbondata.format'")
+    sql("LOAD DATA local INPATH '" + resourcesPath +
+        "/structusingstruct.csv' INTO table structusingstructCarbon options ('DELIMITER'=',', " +
+        "'QUOTECHAR'='\"', 'FILEHEADER'='name','COMPLEX_DELIMITER_LEVEL_1'='$'," +
+        "'COMPLEX_DELIMITER_LEVEL_2'='&')")
+    sql(
+      "create table structusingstructhive(name struct<middlename:string, " +
+      "othernames:struct<firstname:string,lastname:string>,age:int> )row format delimited fields " +
+      "terminated by ',' collection items terminated by '$' map keys terminated by '&'")
+    sql("LOAD DATA local INPATH '" + resourcesPath +
+        "/structusingstruct.csv' INTO table structusingstructhive")
+
+  }
+
+  test("test for create table with complex type") {
+    try {
+      sql("drop table if exists carbon_table")
+      sql(
+        ("CREATE TABLE CARBON_TABLE(stringField string,complexData array<string>)stored by " +
+         "'CARBONDATA' ")
+          .stripMargin)
+      assert(true)
+    }
+    catch {
+      case exception: Exception => assert(false)
+    }
+  }
+
+  test(
+    "Test ^ * special character data loading for complex types") {
+    sql(
+      "create table complexcarbonwithspecialchardelimeter(deviceInformationId int, channelsId " +
+      "string, ROMSize string, ROMName String, purchasedate string, mobile struct<imei:string, " +
+      "imsi:string>, MAC array<string>, locationinfo array<struct<ActiveAreaId:int, " +
+      "ActiveCountry:string, ActiveProvince:string, Activecity:string, ActiveDistrict:string, " +
+      "ActiveStreet:string>>, proddate struct<productionDate:string," +
+      "activeDeactivedate:array<string>>, gamePointId double,contractNumber double)  STORED BY " +
+      "'org.apache.carbondata.format'  TBLPROPERTIES ('DICTIONARY_INCLUDE'='deviceInformationId'," +
+      " 'DICTIONARY_EXCLUDE'='channelsId','COLUMN_GROUP'='(ROMSize,ROMName)')");
+    sql("LOAD DATA local inpath '" + resourcesPath +
+        "/complextypespecialchardelimiter.csv' INTO table complexcarbonwithspecialchardelimeter  " +
+        "OPTIONS('DELIMITER'=',', 'QUOTECHAR'='\"', 'FILEHEADER'='deviceInformationId,channelsId," +
+        "ROMSize,ROMName,purchasedate,mobile,MAC,locationinfo,proddate,gamePointId," +
+        "contractNumber', 'COMPLEX_DELIMITER_LEVEL_1'='^', 'COMPLEX_DELIMITER_LEVEL_2'='*')");
+    sql(
+      "create table complexhivewithspecialchardelimeter(deviceInformationId int, channelsId " +
+      "string, ROMSize string, ROMName String, purchasedate string, mobile struct<imei:string, " +
+      "imsi:string>, MAC array<string>, locationinfo array<struct<ActiveAreaId:int, " +
+      "ActiveCountry:string, ActiveProvince:string, Activecity:string, ActiveDistrict:string, " +
+      "ActiveStreet:string>>, proddate struct<productionDate:string," +
+      "activeDeactivedate:array<string>>, gamePointId double,contractNumber double)row format " +
+      "delimited fields terminated by ',' collection items terminated by '^' map keys terminated " +
+      "by '*'")
+    sql("LOAD DATA local inpath '" + resourcesPath +
+        "/complextypespecialchardelimiter.csv' INTO table complexhivewithspecialchardelimeter");
+    checkAnswer(sql("select * from complexcarbonwithspecialchardelimeter"),
+      sql("select * from complexhivewithspecialchardelimeter"))
+    sql("drop table if exists complexcarbonwithspecialchardelimeter")
+    sql("drop table if exists complexhivewithspecialchardelimeter")
+  }
+
+  test("complex filter set1") {
+    checkAnswer(
+      sql("select test3[1] from complex_filter where test4[1] not like'%1%' order by test1"),
+      Seq(Row(5678), Row(1234))
+    )
+  }
+  test("complex filter set2") {
+    checkAnswer(
+      sql("select test2[0] from complex_filter  where  test3[0] like '%1234%'"),
+      Seq(Row("hello"))
+    )
+  }
+  test("select * from structusingarraycarbon") {
+    checkAnswer(sql("select * from structusingarraycarbon"),
+      sql("select * from structusingarrayhive"))
+  }
+
+  test("select * from structusingstructCarbon") {
+    checkAnswer(sql("select * from structusingstructCarbon"),
+      sql("select * from structusingstructhive"))
+  }
+
+  test("select * from complexcarbontable") {
+    checkAnswer(sql("select * from complexcarbontable"),
+      sql("select * from complexhivetable"))
+  }
+
+  test("select mobile, proddate, deviceInformationId  from complexcarbontable") {
+    checkAnswer(sql("select mobile, proddate, deviceInformationId  from complexcarbontable"),
+      sql("select mobile, proddate, deviceInformationId  from complexhivetable"))
+  }
+
+  test("select mobile, MAC, deviceInformationId, purchasedate from complexcarbontable") {
+    checkAnswer(sql("select mobile, MAC, deviceInformationId, purchasedate from " +
+                    "complexcarbontable"),
+      sql("select mobile, MAC, deviceInformationId, purchasedate from complexhivetable"))
+  }
+
+  test("select mobile, ROMSize, deviceInformationId from complexcarbontable") {
+    checkAnswer(sql("select mobile, ROMSize, deviceInformationId from complexcarbontable"),
+      sql("select mobile, ROMSize, deviceInformationId from complexhivetable"))
+  }
+
+  test("select locationinfo, purchasedate, deviceInformationId from complexcarbontable") {
+    checkAnswer(sql("select locationinfo, purchasedate, deviceInformationId from " +
+                    "complexcarbontable"),
+      sql("select locationinfo, purchasedate, deviceInformationId from complexhivetable"))
+  }
+  test("select locationinfo, ROMName, purchasedate, deviceinformationId from complexcarbontable") {
+    checkAnswer(sql(
+      "select locationinfo, ROMName, purchasedate, deviceinformationId from complexcarbontable"),
+      sql("select locationinfo, ROMName, purchasedate, deviceinformationId from complexhivetable"))
+  }
+  test("select MAC from complexcarbontable where MAC[0] = 'MAC1'") {
+    checkAnswer(sql("select MAC from complexcarbontable where MAC[0] = 'MAC1'"),
+      sql("select MAC from complexhivetable where MAC[0] = 'MAC1'"))
+  }
+  test("select mobile from complexcarbontable where mobile.imei like '1AA%'") {
+    checkAnswer(sql("select mobile from complexcarbontable where mobile.imei like '1AA%'"),
+      sql("select mobile from complexhivetable where mobile.imei like '1AA%'"))
+  }
+
+
+  test(
+    "select locationinfo from complexcarbontable where locationinfo[0].ActiveAreaId > 2 AND " +
+    "locationinfo[0].ActiveAreaId < 7") {
+    checkAnswer(sql(
+      "select locationinfo from complexcarbontable where locationinfo[0].ActiveAreaId > 2 AND " +
+      "locationinfo[0].ActiveAreaId < 7"),
+      sql(
+        "select locationinfo from complexhivetable where locationinfo[0].ActiveAreaId > 2 AND " +
+        "locationinfo[0].ActiveAreaId < 7"))
+  }
+  test(
+    "select locationinfo from complexcarbontable where locationinfo[0].ActiveAreaId >= 2 AND " +
+    "locationinfo[0].ActiveAreaId <= 7") {
+    checkAnswer(sql(
+      "select locationinfo from complexcarbontable where locationinfo[0].ActiveAreaId >= 2 AND " +
+      "locationinfo[0].ActiveAreaId <= 7"),
+      sql(
+        "select locationinfo from complexhivetable where locationinfo[0].ActiveAreaId >= 2 AND " +
+        "locationinfo[0].ActiveAreaId <= 7"))
+  }
+  test(
+    "select locationinfo from complexcarbontable where (locationinfo[0].ActiveAreaId +5 )> 6 AND " +
+    "(locationinfo[0].ActiveAreaId+10) < 20") {
+    checkAnswer(sql(
+      "select locationinfo from complexcarbontable where (locationinfo[0].ActiveAreaId +5 )> 6 " +
+      "AND (locationinfo[0].ActiveAreaId+10) < 20"),
+      sql(
+        "select locationinfo from complexhivetable where (locationinfo[0].ActiveAreaId +5 )> 6 " +
+        "AND (locationinfo[0].ActiveAreaId+10) < 20"))
+  }
+  test("select count(mobile),channelsId from complexcarbontable group by mobile,channelsId") {
+    checkAnswer(sql(
+      "select count(mobile),channelsId from complexcarbontable group by mobile,channelsId"),
+      sql("select count(mobile),channelsId from complexhivetable group by mobile,channelsId"))
+  }
+
+  test(
+    "select count(mobile),channelsId from complexcarbontable group by mobile,channelsId order by " +
+    "channelsId") {
+    checkAnswer(sql(
+      "select count(mobile),channelsId from complexcarbontable group by mobile,channelsId order " +
+      "by channelsId"),
+      sql(
+        "select count(mobile),channelsId from complexhivetable group by mobile,channelsId order " +
+        "by channelsId"))
+  }
+  test(
+    "select count(mobile),channelsId from complexcarbontable group by mobile,channelsId order by " +
+    "channelsId limit 10") {
+    checkAnswer(sql(
+      "select count(mobile),channelsId from complexcarbontable group by mobile,channelsId order " +
+      "by channelsId limit 10"),
+      sql(
+        "select count(mobile),channelsId from complexhivetable group by mobile,channelsId order " +
+        "by channelsId limit 10"))
+  }
+  test(
+    "select count(mobile),channelsId from complexcarbontable where MAC[0] = 'MAC1'  group by " +
+    "mobile,channelsId order by channelsId limit 10") {
+    checkAnswer(sql(
+      "select count(mobile),channelsId from complexcarbontable where MAC[0] = 'MAC1'  group by " +
+      "mobile,channelsId order by channelsId limit 10"),
+      sql(
+        "select count(mobile),channelsId from complexhivetable where MAC[0] = 'MAC1'  group by " +
+        "mobile,channelsId order by channelsId limit 10"))
+  }
+
+  override def afterAll {
+    sql("drop table if exists complexcarbontable")
+    sql("drop table if exists complexhivetable")
+    sql("drop table if exists structusingstructCarbon")
+    sql("drop table if exists structusingstructHive")
+    sql("drop table if exists structusingarraycarbon")
+    sql("drop table if exists structusingarrayhive")
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/a8b67261/integration/spark/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexTypeQuery.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexTypeQuery.scala b/integration/spark/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexTypeQuery.scala
deleted file mode 100644
index c2c15eb..0000000
--- a/integration/spark/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexTypeQuery.scala
+++ /dev/null
@@ -1,289 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.carbondata.integration.spark.testsuite.complexType
-
-import org.apache.spark.sql.Row
-import org.apache.spark.sql.common.util.QueryTest
-import org.scalatest.BeforeAndAfterAll
-
-/**
- * Test class of creating and loading for carbon table with double
- *
- */
-class TestComplexTypeQuery extends QueryTest with BeforeAndAfterAll {
-
-  override def beforeAll: Unit = {
-    sql("drop table if exists complexcarbontable")
-    sql("drop table if exists complexhivetable")
-    sql("drop table if exists complex_filter")
-    sql("drop table if exists structusingstructCarbon")
-    sql("drop table if exists structusingstructHive")
-    sql("drop table if exists structusingarraycarbon")
-    sql("drop table if exists structusingarrayhive")
-    sql(
-      "create table complexcarbontable(deviceInformationId int, channelsId string, ROMSize " +
-      "string, ROMName String, purchasedate string, mobile struct<imei:string, imsi:string>, MAC " +
-      "array<string>, locationinfo array<struct<ActiveAreaId:int, ActiveCountry:string, " +
-      "ActiveProvince:string, Activecity:string, ActiveDistrict:string, ActiveStreet:string>>, " +
-      "proddate struct<productionDate:string,activeDeactivedate:array<string>>, gamePointId " +
-      "double,contractNumber double)  STORED BY 'org.apache.carbondata.format'  TBLPROPERTIES " +
-      "('DICTIONARY_INCLUDE'='deviceInformationId', 'DICTIONARY_EXCLUDE'='channelsId'," +
-      "'COLUMN_GROUP'='(ROMSize,ROMName)')")
-    sql("LOAD DATA local inpath '" + resourcesPath +
-        "/complextypesample.csv' INTO table complexcarbontable  OPTIONS('DELIMITER'=',', " +
-        "'QUOTECHAR'='\"', 'FILEHEADER'='deviceInformationId,channelsId,ROMSize,ROMName," +
-        "purchasedate,mobile,MAC,locationinfo,proddate,gamePointId,contractNumber', " +
-        "'COMPLEX_DELIMITER_LEVEL_1'='$', 'COMPLEX_DELIMITER_LEVEL_2'=':')");
-    sql(
-      "create table complexhivetable(deviceInformationId int, channelsId string, ROMSize string, " +
-      "ROMName String, purchasedate string, mobile struct<imei:string, imsi:string>, MAC " +
-      "array<string>, locationinfo array<struct<ActiveAreaId:int, ActiveCountry:string, " +
-      "ActiveProvince:string, Activecity:string, ActiveDistrict:string, ActiveStreet:string>>, " +
-      "proddate struct<productionDate:string,activeDeactivedate:array<string>>, gamePointId " +
-      "double,contractNumber double)row format delimited fields terminated by ',' collection " +
-      "items terminated by '$' map keys terminated by ':'")
-    sql(s"LOAD DATA local inpath '$resourcesPath/complextypesample.csv' INTO table " +
-        s"complexhivetable")
-    sql(
-      "create table complex_filter(test1 int, test2 array<String>,test3 array<bigint>,test4 " +
-      "array<int>,test5 array<decimal>,test6 array<timestamp>,test7 array<double>) STORED BY 'org" +
-      ".apache.carbondata.format'")
-    sql("LOAD DATA INPATH '" + resourcesPath +
-        "/array1.csv'  INTO TABLE complex_filter options ('DELIMITER'=',', 'QUOTECHAR'='\"', " +
-        "'COMPLEX_DELIMITER_LEVEL_1'='$', 'FILEHEADER'= 'test1,test2,test3,test4,test5,test6," +
-        "test7')")
-      ()
-
-    sql(
-      "create table structusingarraycarbon (MAC struct<MAC1:array<string>," +
-      "ActiveCountry:array<string>>) STORED BY 'org.apache.carbondata.format'");
-    sql("LOAD DATA local INPATH '" + resourcesPath +
-        "/struct_all.csv' INTO table structusingarraycarbon options ('DELIMITER'=',', " +
-        "'QUOTECHAR'='\"', 'FILEHEADER'='MAC','COMPLEX_DELIMITER_LEVEL_1'='$'," +
-        "'COMPLEX_DELIMITER_LEVEL_2'='&')")
-    sql(
-      "create table structusingarrayhive (MAC struct<MAC1:array<string>," +
-      "ActiveCountry:array<string>>)row format delimited fields terminated by ',' collection " +
-      "items terminated by '$' map keys terminated by '&'");
-    sql("LOAD DATA local INPATH '" + resourcesPath +
-        "/struct_all.csv' INTO table structusingarrayhive")
-
-    sql(
-      "create table structusingstructCarbon(name struct<middlename:string, " +
-      "othernames:struct<firstname:string,lastname:string>,age:int> ) STORED BY 'org.apache" +
-      ".carbondata.format'")
-    sql("LOAD DATA local INPATH '" + resourcesPath +
-        "/structusingstruct.csv' INTO table structusingstructCarbon options ('DELIMITER'=',', " +
-        "'QUOTECHAR'='\"', 'FILEHEADER'='name','COMPLEX_DELIMITER_LEVEL_1'='$'," +
-        "'COMPLEX_DELIMITER_LEVEL_2'='&')")
-    sql(
-      "create table structusingstructhive(name struct<middlename:string, " +
-      "othernames:struct<firstname:string,lastname:string>,age:int> )row format delimited fields " +
-      "terminated by ',' collection items terminated by '$' map keys terminated by '&'")
-    sql("LOAD DATA local INPATH '" + resourcesPath +
-        "/structusingstruct.csv' INTO table structusingstructhive")
-
-  }
-
-  test("test for create table with complex type") {
-    try {
-      sql("drop table if exists carbon_table")
-      sql(
-        ("CREATE TABLE CARBON_TABLE(stringField string,complexData array<string>)stored by " +
-         "'CARBONDATA' ")
-          .stripMargin)
-      assert(true)
-    }
-    catch {
-      case exception: Exception => assert(false)
-    }
-  }
-
-  test(
-    "Test ^ * special character data loading for complex types") {
-    sql(
-      "create table complexcarbonwithspecialchardelimeter(deviceInformationId int, channelsId " +
-      "string, ROMSize string, ROMName String, purchasedate string, mobile struct<imei:string, " +
-      "imsi:string>, MAC array<string>, locationinfo array<struct<ActiveAreaId:int, " +
-      "ActiveCountry:string, ActiveProvince:string, Activecity:string, ActiveDistrict:string, " +
-      "ActiveStreet:string>>, proddate struct<productionDate:string," +
-      "activeDeactivedate:array<string>>, gamePointId double,contractNumber double)  STORED BY " +
-      "'org.apache.carbondata.format'  TBLPROPERTIES ('DICTIONARY_INCLUDE'='deviceInformationId'," +
-      " 'DICTIONARY_EXCLUDE'='channelsId','COLUMN_GROUP'='(ROMSize,ROMName)')");
-    sql("LOAD DATA local inpath '" + resourcesPath +
-        "/complextypespecialchardelimiter.csv' INTO table complexcarbonwithspecialchardelimeter  " +
-        "OPTIONS('DELIMITER'=',', 'QUOTECHAR'='\"', 'FILEHEADER'='deviceInformationId,channelsId," +
-        "ROMSize,ROMName,purchasedate,mobile,MAC,locationinfo,proddate,gamePointId," +
-        "contractNumber', 'COMPLEX_DELIMITER_LEVEL_1'='^', 'COMPLEX_DELIMITER_LEVEL_2'='*')");
-    sql(
-      "create table complexhivewithspecialchardelimeter(deviceInformationId int, channelsId " +
-      "string, ROMSize string, ROMName String, purchasedate string, mobile struct<imei:string, " +
-      "imsi:string>, MAC array<string>, locationinfo array<struct<ActiveAreaId:int, " +
-      "ActiveCountry:string, ActiveProvince:string, Activecity:string, ActiveDistrict:string, " +
-      "ActiveStreet:string>>, proddate struct<productionDate:string," +
-      "activeDeactivedate:array<string>>, gamePointId double,contractNumber double)row format " +
-      "delimited fields terminated by ',' collection items terminated by '^' map keys terminated " +
-      "by '*'")
-    sql("LOAD DATA local inpath '" + resourcesPath +
-        "/complextypespecialchardelimiter.csv' INTO table complexhivewithspecialchardelimeter");
-    checkAnswer(sql("select * from complexcarbonwithspecialchardelimeter"),
-      sql("select * from complexhivewithspecialchardelimeter"))
-    sql("drop table if exists complexcarbonwithspecialchardelimeter")
-    sql("drop table if exists complexhivewithspecialchardelimeter")
-  }
-
-  test("complex filter set1") {
-    checkAnswer(
-      sql("select test3[1] from complex_filter where test4[1] not like'%1%' order by test1"),
-      Seq(Row(5678), Row(1234))
-    )
-  }
-  test("complex filter set2") {
-    checkAnswer(
-      sql("select test2[0] from complex_filter  where  test3[0] like '%1234%'"),
-      Seq(Row("hello"))
-    )
-  }
-  test("select * from structusingarraycarbon") {
-    checkAnswer(sql("select * from structusingarraycarbon"),
-      sql("select * from structusingarrayhive"))
-  }
-
-  test("select * from structusingstructCarbon") {
-    checkAnswer(sql("select * from structusingstructCarbon"),
-      sql("select * from structusingstructhive"))
-  }
-
-  test("select * from complexcarbontable") {
-    checkAnswer(sql("select * from complexcarbontable"),
-      sql("select * from complexhivetable"))
-  }
-
-  test("select mobile, proddate, deviceInformationId  from complexcarbontable") {
-    checkAnswer(sql("select mobile, proddate, deviceInformationId  from complexcarbontable"),
-      sql("select mobile, proddate, deviceInformationId  from complexhivetable"))
-  }
-
-  test("select mobile, MAC, deviceInformationId, purchasedate from complexcarbontable") {
-    checkAnswer(sql("select mobile, MAC, deviceInformationId, purchasedate from " +
-                    "complexcarbontable"),
-      sql("select mobile, MAC, deviceInformationId, purchasedate from complexhivetable"))
-  }
-
-  test("select mobile, ROMSize, deviceInformationId from complexcarbontable") {
-    checkAnswer(sql("select mobile, ROMSize, deviceInformationId from complexcarbontable"),
-      sql("select mobile, ROMSize, deviceInformationId from complexhivetable"))
-  }
-
-  test("select locationinfo, purchasedate, deviceInformationId from complexcarbontable") {
-    checkAnswer(sql("select locationinfo, purchasedate, deviceInformationId from " +
-                    "complexcarbontable"),
-      sql("select locationinfo, purchasedate, deviceInformationId from complexhivetable"))
-  }
-  test("select locationinfo, ROMName, purchasedate, deviceinformationId from complexcarbontable") {
-    checkAnswer(sql(
-      "select locationinfo, ROMName, purchasedate, deviceinformationId from complexcarbontable"),
-      sql("select locationinfo, ROMName, purchasedate, deviceinformationId from complexhivetable"))
-  }
-  test("select MAC from complexcarbontable where MAC[0] = 'MAC1'") {
-    checkAnswer(sql("select MAC from complexcarbontable where MAC[0] = 'MAC1'"),
-      sql("select MAC from complexhivetable where MAC[0] = 'MAC1'"))
-  }
-  test("select mobile from complexcarbontable where mobile.imei like '1AA%'") {
-    checkAnswer(sql("select mobile from complexcarbontable where mobile.imei like '1AA%'"),
-      sql("select mobile from complexhivetable where mobile.imei like '1AA%'"))
-  }
-
-
-  test(
-    "select locationinfo from complexcarbontable where locationinfo[0].ActiveAreaId > 2 AND " +
-    "locationinfo[0].ActiveAreaId < 7") {
-    checkAnswer(sql(
-      "select locationinfo from complexcarbontable where locationinfo[0].ActiveAreaId > 2 AND " +
-      "locationinfo[0].ActiveAreaId < 7"),
-      sql(
-        "select locationinfo from complexhivetable where locationinfo[0].ActiveAreaId > 2 AND " +
-        "locationinfo[0].ActiveAreaId < 7"))
-  }
-  test(
-    "select locationinfo from complexcarbontable where locationinfo[0].ActiveAreaId >= 2 AND " +
-    "locationinfo[0].ActiveAreaId <= 7") {
-    checkAnswer(sql(
-      "select locationinfo from complexcarbontable where locationinfo[0].ActiveAreaId >= 2 AND " +
-      "locationinfo[0].ActiveAreaId <= 7"),
-      sql(
-        "select locationinfo from complexhivetable where locationinfo[0].ActiveAreaId >= 2 AND " +
-        "locationinfo[0].ActiveAreaId <= 7"))
-  }
-  test(
-    "select locationinfo from complexcarbontable where (locationinfo[0].ActiveAreaId +5 )> 6 AND " +
-    "(locationinfo[0].ActiveAreaId+10) < 20") {
-    checkAnswer(sql(
-      "select locationinfo from complexcarbontable where (locationinfo[0].ActiveAreaId +5 )> 6 " +
-      "AND (locationinfo[0].ActiveAreaId+10) < 20"),
-      sql(
-        "select locationinfo from complexhivetable where (locationinfo[0].ActiveAreaId +5 )> 6 " +
-        "AND (locationinfo[0].ActiveAreaId+10) < 20"))
-  }
-  test("select count(mobile),channelsId from complexcarbontable group by mobile,channelsId") {
-    checkAnswer(sql(
-      "select count(mobile),channelsId from complexcarbontable group by mobile,channelsId"),
-      sql("select count(mobile),channelsId from complexhivetable group by mobile,channelsId"))
-  }
-
-  test(
-    "select count(mobile),channelsId from complexcarbontable group by mobile,channelsId order by " +
-    "channelsId") {
-    checkAnswer(sql(
-      "select count(mobile),channelsId from complexcarbontable group by mobile,channelsId order " +
-      "by channelsId"),
-      sql(
-        "select count(mobile),channelsId from complexhivetable group by mobile,channelsId order " +
-        "by channelsId"))
-  }
-  test(
-    "select count(mobile),channelsId from complexcarbontable group by mobile,channelsId order by " +
-    "channelsId limit 10") {
-    checkAnswer(sql(
-      "select count(mobile),channelsId from complexcarbontable group by mobile,channelsId order " +
-      "by channelsId limit 10"),
-      sql(
-        "select count(mobile),channelsId from complexhivetable group by mobile,channelsId order " +
-        "by channelsId limit 10"))
-  }
-  test(
-    "select count(mobile),channelsId from complexcarbontable where MAC[0] = 'MAC1'  group by " +
-    "mobile,channelsId order by channelsId limit 10") {
-    checkAnswer(sql(
-      "select count(mobile),channelsId from complexcarbontable where MAC[0] = 'MAC1'  group by " +
-      "mobile,channelsId order by channelsId limit 10"),
-      sql(
-        "select count(mobile),channelsId from complexhivetable where MAC[0] = 'MAC1'  group by " +
-        "mobile,channelsId order by channelsId limit 10"))
-  }
-
-  override def afterAll {
-    sql("drop table if exists complexcarbontable")
-    sql("drop table if exists complexhivetable")
-    sql("drop table if exists structusingstructCarbon")
-    sql("drop table if exists structusingstructHive")
-    sql("drop table if exists structusingarraycarbon")
-    sql("drop table if exists structusingarrayhive")
-
-  }
-}