You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by ku...@apache.org on 2019/05/16 09:42:25 UTC

[carbondata] branch master updated: [CARBONDATA-3374] Optimize documentation and fix some spell errors.

This is an automated email from the ASF dual-hosted git repository.

kunalkapoor pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/carbondata.git


The following commit(s) were added to refs/heads/master by this push:
     new ed17abb  [CARBONDATA-3374] Optimize documentation and fix some spell errors.
ed17abb is described below

commit ed17abbff1fa08d60085979a6c42b7b569dae141
Author: xubo245 <xu...@huawei.com>
AuthorDate: Tue May 7 20:47:13 2019 +0800

    [CARBONDATA-3374] Optimize documentation and fix some spell errors.
    
    Optimize documentation and fix some spell errors.
    
    This closes #3207
---
 .../apache/carbondata/core/datamap/dev/DataMapFactory.java   |  4 ++--
 .../carbondata/core/indexstore/BlockletDetailsFetcher.java   |  4 ++--
 .../indexstore/blockletindex/BlockletDataMapFactory.java     |  2 +-
 .../carbondata/core/indexstore/schema/SchemaGenerator.java   |  2 +-
 .../apache/carbondata/core/util/path/CarbonTablePath.java    |  2 +-
 .../carbondata/datamap/lucene/LuceneDataMapFactoryBase.java  |  2 +-
 .../datamap/lucene/LuceneFineGrainDataMapFactory.java        |  2 +-
 docs/carbon-as-spark-datasource-guide.md                     |  2 +-
 docs/ddl-of-carbondata.md                                    | 12 +++++++-----
 .../spark/testsuite/dataload/TestLoadDataGeneral.scala       |  4 ++--
 .../spark/testsuite/datamap/CGDataMapTestCase.scala          |  4 ++--
 .../spark/testsuite/datamap/DataMapWriterSuite.scala         |  2 +-
 .../spark/testsuite/datamap/FGDataMapTestCase.scala          |  4 ++--
 .../apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala   |  2 +-
 .../org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala   |  6 +++---
 .../execution/datasources/SparkCarbonFileFormat.scala        |  3 ++-
 .../scala/org/apache/spark/sql/CarbonCatalystOperators.scala |  4 ++--
 .../execution/command/management/CarbonLoadDataCommand.scala |  2 +-
 .../scala/org/apache/spark/sql/optimizer/CarbonFilters.scala |  2 +-
 19 files changed, 34 insertions(+), 31 deletions(-)

diff --git a/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java b/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java
index ee7914d..b32a482 100644
--- a/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java
+++ b/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java
@@ -88,7 +88,7 @@ public abstract class DataMapFactory<T extends DataMap> {
   }
 
   /**
-   * Get the datamap for segmentid
+   * Get the datamap for segmentId
    */
   public abstract List<T> getDataMaps(Segment segment) throws IOException;
 
@@ -99,7 +99,7 @@ public abstract class DataMapFactory<T extends DataMap> {
       throws IOException;
 
   /**
-   * Get all distributable objects of a segmentid
+   * Get all distributable objects of a segmentId
    * @return
    */
   public abstract List<DataMapDistributable> toDistributable(Segment segment);
diff --git a/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletDetailsFetcher.java b/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletDetailsFetcher.java
index 1971f40..ae01e9e 100644
--- a/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletDetailsFetcher.java
+++ b/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletDetailsFetcher.java
@@ -27,7 +27,7 @@ import org.apache.carbondata.core.datamap.Segment;
 public interface BlockletDetailsFetcher {
 
   /**
-   * Get the blocklet detail information based on blockletid, blockid and segmentid.
+   * Get the blocklet detail information based on blockletid, blockid and segmentId.
    *
    * @param blocklets
    * @param segment
@@ -38,7 +38,7 @@ public interface BlockletDetailsFetcher {
       throws IOException;
 
   /**
-   * Get the blocklet detail information based on blockletid, blockid and segmentid.
+   * Get the blocklet detail information based on blockletid, blockid and segmentId.
    *
    * @param blocklet
    * @param segment
diff --git a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMapFactory.java b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMapFactory.java
index 2ef7b88..93be06e 100644
--- a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMapFactory.java
+++ b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMapFactory.java
@@ -185,7 +185,7 @@ public class BlockletDataMapFactory extends CoarseGrainDataMapFactory
   }
 
   /**
-   * Get the blocklet detail information based on blockletid, blockid and segmentid. This method is
+   * Get the blocklet detail information based on blockletid, blockid and segmentId. This method is
    * exclusively for BlockletDataMapFactory as detail information is only available in this
    * default datamap.
    */
diff --git a/core/src/main/java/org/apache/carbondata/core/indexstore/schema/SchemaGenerator.java b/core/src/main/java/org/apache/carbondata/core/indexstore/schema/SchemaGenerator.java
index 41c382b..288e062 100644
--- a/core/src/main/java/org/apache/carbondata/core/indexstore/schema/SchemaGenerator.java
+++ b/core/src/main/java/org/apache/carbondata/core/indexstore/schema/SchemaGenerator.java
@@ -189,7 +189,7 @@ public class SchemaGenerator {
     // for storing file name
     taskMinMaxSchemas
         .add(new CarbonRowSchema.VariableCarbonRowSchema(DataTypes.BYTE_ARRAY));
-    // for storing segmentid
+    // for storing segmentId
     taskMinMaxSchemas
         .add(new CarbonRowSchema.VariableCarbonRowSchema(DataTypes.BYTE_ARRAY));
     // for storing min max flag for each column which reflects whether min max for a column is
diff --git a/core/src/main/java/org/apache/carbondata/core/util/path/CarbonTablePath.java b/core/src/main/java/org/apache/carbondata/core/util/path/CarbonTablePath.java
index da558be..422a6dc 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/path/CarbonTablePath.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/path/CarbonTablePath.java
@@ -291,7 +291,7 @@ public class CarbonTablePath {
   }
 
   /**
-   * Return the segment path from table path and segmentid
+   * Return the segment path from table path and segmentId
    */
   public static String getSegmentPath(String tablePath, String segmentId) {
     return getPartitionDir(tablePath) + File.separator + SEGMENT_PREFIX + segmentId;
diff --git a/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneDataMapFactoryBase.java b/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneDataMapFactoryBase.java
index 68c3bcc..3ae390d 100644
--- a/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneDataMapFactoryBase.java
+++ b/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneDataMapFactoryBase.java
@@ -209,7 +209,7 @@ abstract class LuceneDataMapFactoryBase<T extends DataMap> extends DataMapFactor
   }
 
   /**
-   * Get all distributable objects of a segmentid
+   * Get all distributable objects of a segmentId
    */
   @Override
   public List<DataMapDistributable> toDistributable(Segment segment) {
diff --git a/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapFactory.java b/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapFactory.java
index a3c4063..7ee843b 100644
--- a/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapFactory.java
+++ b/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapFactory.java
@@ -47,7 +47,7 @@ public class LuceneFineGrainDataMapFactory extends LuceneDataMapFactoryBase<Fine
   }
 
   /**
-   * Get the datamap for segmentid
+   * Get the datamap for segmentId
    */
   @Override public List<FineGrainDataMap> getDataMaps(Segment segment) throws IOException {
     List<FineGrainDataMap> lstDataMap = new ArrayList<>();
diff --git a/docs/carbon-as-spark-datasource-guide.md b/docs/carbon-as-spark-datasource-guide.md
index fe46b09..66338f1 100644
--- a/docs/carbon-as-spark-datasource-guide.md
+++ b/docs/carbon-as-spark-datasource-guide.md
@@ -19,7 +19,7 @@
 
 The CarbonData fileformat is now integrated as Spark datasource for read and write operation without using CarbonSession. This is useful for users who wants to use carbondata as spark's data source. 
 
-**Note:** You can only apply the functions/features supported by spark datasource APIs, functionalities supported would be similar to Parquet. The carbon session features are not supported.
+**Note:** You can only apply the functions/features supported by spark datasource APIs, functionalities supported would be similar to Parquet. The carbon session features are not supported. The result is displayed as byte array format when select query on binary column in spark-sql.
 
 # Create Table with DDL
 
diff --git a/docs/ddl-of-carbondata.md b/docs/ddl-of-carbondata.md
index 34eca8d..2d43645 100644
--- a/docs/ddl-of-carbondata.md
+++ b/docs/ddl-of-carbondata.md
@@ -157,6 +157,7 @@ CarbonData DDL statements are documented here,which includes:
       * BOOLEAN
       * FLOAT
       * BYTE
+      * Binary
    * In case of multi-level complex dataType columns, primitive string/varchar/char columns are considered for local dictionary generation.
 
    System Level Properties for Local Dictionary: 
@@ -224,7 +225,7 @@ CarbonData DDL statements are documented here,which includes:
    - ##### Sort Columns Configuration
 
      This property is for users to specify which columns belong to the MDK(Multi-Dimensions-Key) index.
-     * If users don't specify "SORT_COLUMN" property, by default no columns are sorted 
+     * If users don't specify "SORT_COLUMNS" property, by default no columns are sorted 
      * If this property is specified but with empty argument, then the table will be loaded without sort.
      * This supports only string, date, timestamp, short, int, long, byte and boolean data types.
      Suggested use cases : Only build MDK index for required columns,it might help to improve the data loading performance.
@@ -233,7 +234,7 @@ CarbonData DDL statements are documented here,which includes:
      TBLPROPERTIES ('SORT_COLUMNS'='column1, column3')
      ```
 
-     **NOTE**: Sort_Columns for Complex datatype columns is not supported.
+     **NOTE**: Sort_Columns for Complex datatype columns and binary data type is not supported.
 
    - ##### Sort Scope Configuration
    
@@ -331,7 +332,7 @@ CarbonData DDL statements are documented here,which includes:
 
    - ##### Caching Min/Max Value for Required Columns
 
-     By default, CarbonData caches min and max values of all the columns in schema.  As the load increases, the memory required to hold the min and max values increases considerably. This feature enables you to configure min and max values only for the required columns, resulting in optimized memory usage. 
+     By default, CarbonData caches min and max values of all the columns in schema.  As the load increases, the memory required to hold the min and max values increases considerably. This feature enables you to configure min and max values only for the required columns, resulting in optimized memory usage. This feature doesn't support binary data type.
 
       Following are the valid values for COLUMN_META_CACHE:
       * If you want no column min/max values to be cached in the driver.
@@ -519,6 +520,7 @@ CarbonData DDL statements are documented here,which includes:
    - ##### Range Column
      This property is used to specify a column to partition the input data by range.
      Only one column can be configured. During data loading, you can use "global_sort_partitions" or "scale_factor" to avoid generating small files.
+     This feature doesn't support binary data type.
 
      ```
      TBLPROPERTIES('RANGE_COLUMN'='col1')
@@ -916,7 +918,7 @@ Users can specify which columns to include and exclude for local dictionary gene
   PARTITIONED BY (productCategory STRING, productBatch STRING)
   STORED AS carbondata
   ```
-   **NOTE:** Hive partition is not supported on complex datatype columns.
+   **NOTE:** Hive partition is not supported on complex data type columns and binary data type.
 
 
 #### Show Partitions
@@ -959,7 +961,7 @@ Users can specify which columns to include and exclude for local dictionary gene
 
 ### CARBONDATA PARTITION(HASH,RANGE,LIST) -- Alpha feature, this partition feature does not support update and delete data.
 
-  The partition supports three type:(Hash,Range,List), similar to other system's partition features, CarbonData's partition feature can be used to improve query performance by filtering on the partition column.
+  The partition supports three type:(Hash,Range,List), similar to other system's partition features, CarbonData's partition feature can be used to improve query performance by filtering on the partition column. Partition feature doesn't support binary data type.
 
 ### Create Hash Partition Table
 
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataGeneral.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataGeneral.scala
index 8361862..51e7f68 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataGeneral.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataGeneral.scala
@@ -45,9 +45,9 @@ class TestLoadDataGeneral extends QueryTest with BeforeAndAfterEach {
 
   private def checkSegmentExists(
       segmentId: String,
-      datbaseName: String,
+      databaseName: String,
       tableName: String): Boolean = {
-    val carbonTable = CarbonMetadata.getInstance().getCarbonTable(datbaseName, tableName)
+    val carbonTable = CarbonMetadata.getInstance().getCarbonTable(databaseName, tableName)
     val partitionPath =
       CarbonTablePath.getPartitionDir(carbonTable.getAbsoluteTableIdentifier.getTablePath)
     val fileType: FileFactory.FileType = FileFactory.getFileType(partitionPath)
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/CGDataMapTestCase.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/CGDataMapTestCase.scala
index a6bc30d..69334a0 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/CGDataMapTestCase.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/CGDataMapTestCase.scala
@@ -62,7 +62,7 @@ class CGDataMapFactory(
   }
 
   /**
-   * Get the datamap for segmentid
+   * Get the datamap for segmentId
    */
   override def getDataMaps(segment: Segment): java.util.List[CoarseGrainDataMap] = {
     val path = identifier.getTablePath
@@ -98,7 +98,7 @@ class CGDataMapFactory(
   }
 
   /**
-   * Get all distributable objects of a segmentid
+   * Get all distributable objects of a segmentId
    *
    * @return
    */
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/DataMapWriterSuite.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/DataMapWriterSuite.scala
index c75649a..449ffa0 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/DataMapWriterSuite.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/DataMapWriterSuite.scala
@@ -62,7 +62,7 @@ class C2DataMapFactory(
     new DataMapMeta(carbonTable.getIndexedColumns(dataMapSchema), List(ExpressionType.EQUALS).asJava)
 
   /**
-   * Get all distributable objects of a segmentid
+   * Get all distributable objects of a segmentId
    *
    * @return
    */
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/FGDataMapTestCase.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/FGDataMapTestCase.scala
index 99e0509..ff77820 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/FGDataMapTestCase.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/FGDataMapTestCase.scala
@@ -61,7 +61,7 @@ class FGDataMapFactory(carbonTable: CarbonTable,
   }
 
   /**
-   * Get the datamap for segmentid
+   * Get the datamap for segmentId
    */
   override def getDataMaps(segment: Segment): java.util.List[FineGrainDataMap] = {
     val path = CarbonTablePath.getSegmentPath(carbonTable.getTablePath, segment.getSegmentNo)
@@ -86,7 +86,7 @@ class FGDataMapFactory(carbonTable: CarbonTable,
   }
 
   /**
-   * Get all distributable objects of a segmentid
+   * Get all distributable objects of a segmentId
    *
    * @return
    */
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
index 1140e72..ac8224e 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
@@ -205,7 +205,7 @@ class NewCarbonDataLoadRDD[K, V](
        *
        * @return
        */
-      def gernerateBlocksID: String = {
+      def generateBlocksID: String = {
         carbonLoadModel.getDatabaseName + "_" + carbonLoadModel.getTableName + "_" +
         UUID.randomUUID()
       }
diff --git a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
index a2b2af6..703ae37 100644
--- a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
+++ b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
@@ -870,7 +870,7 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
                  !dictIncludeCols.exists(x => x.equalsIgnoreCase(field.column))) {
         noDictionaryDims :+= field.column
         dimFields += field
-      } else if (isDetectAsDimentionDataType(field.dataType.get)) {
+      } else if (isDetectAsDimensionDataType(field.dataType.get)) {
         dimFields += field
         // consider all String and binary cols as noDicitonaryDims by default
         if ((DataTypes.STRING.getName.equalsIgnoreCase(field.dataType.get)) ||
@@ -934,11 +934,11 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
   }
 
   /**
-   * detect dimention data type
+   * detect dimension data type
    *
    * @param dimensionDatatype
    */
-  def isDetectAsDimentionDataType(dimensionDatatype: String): Boolean = {
+  def isDetectAsDimensionDataType(dimensionDatatype: String): Boolean = {
     val dimensionType = Array("string",
       "array",
       "struct",
diff --git a/integration/spark-datasource/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/SparkCarbonFileFormat.scala b/integration/spark-datasource/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/SparkCarbonFileFormat.scala
index 5f62362..185058c 100644
--- a/integration/spark-datasource/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/SparkCarbonFileFormat.scala
+++ b/integration/spark-datasource/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/SparkCarbonFileFormat.scala
@@ -78,7 +78,8 @@ class SparkCarbonFileFormat extends FileFormat
 
   /**
    * If user does not provide schema while reading the data then spark calls this method to infer
-   * schema from the carbodata files. It reads the schema present in carbondata files and return it.
+   * schema from the carbondata files.
+   * It reads the schema present in carbondata files and return it.
    */
   override def inferSchema(sparkSession: SparkSession,
       options: Map[String, String],
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonCatalystOperators.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonCatalystOperators.scala
index 450ead1..ff0b9e6 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonCatalystOperators.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonCatalystOperators.scala
@@ -38,12 +38,12 @@ case class CarbonDictionaryCatalystDecoder(
   override def output: Seq[Attribute] = {
     child match {
       case l: LogicalRelation =>
-        // If the child is logical plan then firts update all dictionary attr with IntegerType
+        // If the child is logical plan then first update all dictionary attr with IntegerType
         val logicalOut =
           CarbonDictionaryDecoder.updateAttributes(child.output, relations, aliasMap)
         CarbonDictionaryDecoder.convertOutput(logicalOut, relations, profile, aliasMap)
       case Filter(cond, l: LogicalRelation) =>
-        // If the child is logical plan then firts update all dictionary attr with IntegerType
+        // If the child is logical plan then first update all dictionary attr with IntegerType
         val logicalOut =
           CarbonDictionaryDecoder.updateAttributes(child.output, relations, aliasMap)
         CarbonDictionaryDecoder.convertOutput(logicalOut, relations, profile, aliasMap)
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala
index b4ef1f0..41494c5 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala
@@ -1041,7 +1041,7 @@ case class CarbonLoadDataCommand(
   }
 
   /**
-   * Create the logical plan for update scenario. Here we should drop the segmentid column from the
+   * Create the logical plan for update scenario. Here we should drop the segmentId column from the
    * input rdd.
    */
   private def getLogicalQueryForUpdate(
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonFilters.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonFilters.scala
index b84a7b0..c4415f8 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonFilters.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonFilters.scala
@@ -532,7 +532,7 @@ object CarbonFilters {
           // read partitions directly from hive metastore using filters
           sparkSession.sessionState.catalog.listPartitionsByFilter(identifier, partitionFilters)
         } else {
-          // Read partitions alternatively by firts get all partitions then filter them
+          // Read partitions alternatively by first get all partitions then filter them
           sparkSession.sessionState.catalog.
             asInstanceOf[CarbonSessionCatalog].getPartitionsAlternate(
             partitionFilters,