You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by xu...@apache.org on 2018/12/31 07:35:20 UTC
[1/2] carbondata git commit: [CARBONDATA-3208] Remove unused
parameters, imports and optimize the spell errors
Repository: carbondata
Updated Branches:
refs/heads/master b0733ecbf -> 21330b825
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/DictionaryLRUCacheTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/DictionaryLRUCacheTestCase.scala b/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/DictionaryLRUCacheTestCase.scala
index 3c50a18..301644f 100644
--- a/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/DictionaryLRUCacheTestCase.scala
+++ b/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/DictionaryLRUCacheTestCase.scala
@@ -16,8 +16,6 @@
*/
package org.apache.carbondata.spark.util
-import java.io.File
-
import scala.collection.JavaConverters._
import org.apache.spark.sql.common.util.Spark2QueryTest
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark2/src/test/scala/org/apache/spark/carbondata/BadRecordPathLoadOptionTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/spark/carbondata/BadRecordPathLoadOptionTest.scala b/integration/spark2/src/test/scala/org/apache/spark/carbondata/BadRecordPathLoadOptionTest.scala
index e3e261f..1cec6ec 100644
--- a/integration/spark2/src/test/scala/org/apache/spark/carbondata/BadRecordPathLoadOptionTest.scala
+++ b/integration/spark2/src/test/scala/org/apache/spark/carbondata/BadRecordPathLoadOptionTest.scala
@@ -17,18 +17,15 @@
package org.apache.spark.carbondata
-import java.io.File
-
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.datastore.filesystem.{CarbonFile, CarbonFileFilter}
+import org.apache.carbondata.core.datastore.impl.FileFactory
+import org.apache.carbondata.core.util.CarbonProperties
import org.apache.spark.sql.CarbonEnv
import org.apache.spark.sql.common.util.Spark2QueryTest
import org.apache.spark.sql.hive.HiveContext
import org.scalatest.BeforeAndAfterAll
-import org.apache.carbondata.core.constants.{CarbonCommonConstants, CarbonLoadOptionConstants}
-import org.apache.carbondata.core.datastore.filesystem.{CarbonFile, CarbonFileFilter}
-import org.apache.carbondata.core.datastore.impl.FileFactory
-import org.apache.carbondata.core.util.CarbonProperties
-
/**
* Test Class for detailed query on timestamp datatypes
*
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark2/src/test/scala/org/apache/spark/carbondata/datatype/NumericDimensionBadRecordTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/spark/carbondata/datatype/NumericDimensionBadRecordTest.scala b/integration/spark2/src/test/scala/org/apache/spark/carbondata/datatype/NumericDimensionBadRecordTest.scala
index 44fea03..28f2e16 100644
--- a/integration/spark2/src/test/scala/org/apache/spark/carbondata/datatype/NumericDimensionBadRecordTest.scala
+++ b/integration/spark2/src/test/scala/org/apache/spark/carbondata/datatype/NumericDimensionBadRecordTest.scala
@@ -17,8 +17,6 @@
package org.apache.carbondata.spark.testsuite.badrecordloger
-import java.io.File
-
import org.apache.spark.sql.Row
import org.apache.spark.sql.common.util.Spark2QueryTest
import org.apache.spark.sql.hive.HiveContext
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark2/src/test/scala/org/apache/spark/carbondata/iud/DeleteCarbonTableSubqueryTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/spark/carbondata/iud/DeleteCarbonTableSubqueryTestCase.scala b/integration/spark2/src/test/scala/org/apache/spark/carbondata/iud/DeleteCarbonTableSubqueryTestCase.scala
index 697b727..8d2955e 100644
--- a/integration/spark2/src/test/scala/org/apache/spark/carbondata/iud/DeleteCarbonTableSubqueryTestCase.scala
+++ b/integration/spark2/src/test/scala/org/apache/spark/carbondata/iud/DeleteCarbonTableSubqueryTestCase.scala
@@ -20,9 +20,6 @@ import org.apache.spark.sql.Row
import org.apache.spark.sql.common.util.Spark2QueryTest
import org.scalatest.BeforeAndAfterAll
-import org.apache.carbondata.core.constants.CarbonCommonConstants
-import org.apache.carbondata.core.util.CarbonProperties
-
class DeleteCarbonTableSubqueryTestCase extends Spark2QueryTest with BeforeAndAfterAll {
override def beforeAll {
sql("use default")
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark2/src/test/scala/org/apache/spark/sql/common/util/Spark2QueryTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/spark/sql/common/util/Spark2QueryTest.scala b/integration/spark2/src/test/scala/org/apache/spark/sql/common/util/Spark2QueryTest.scala
index ff64c05..2448d3c 100644
--- a/integration/spark2/src/test/scala/org/apache/spark/sql/common/util/Spark2QueryTest.scala
+++ b/integration/spark2/src/test/scala/org/apache/spark/sql/common/util/Spark2QueryTest.scala
@@ -17,8 +17,7 @@
package org.apache.spark.sql.common.util
-import org.apache.spark.sql.CarbonSession
-import org.apache.spark.sql.hive.{CarbonHiveSessionCatalog, HiveExternalCatalog}
+import org.apache.spark.sql.hive.CarbonHiveSessionCatalog
import org.apache.spark.sql.test.util.QueryTest
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark2/src/test/scala/org/apache/spark/util/CarbonCommandSuite.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/spark/util/CarbonCommandSuite.scala b/integration/spark2/src/test/scala/org/apache/spark/util/CarbonCommandSuite.scala
index 8ad28a3..c2f7a44 100644
--- a/integration/spark2/src/test/scala/org/apache/spark/util/CarbonCommandSuite.scala
+++ b/integration/spark2/src/test/scala/org/apache/spark/util/CarbonCommandSuite.scala
@@ -29,7 +29,6 @@ import org.apache.carbondata.api.CarbonStore
import org.apache.carbondata.common.constants.LoggerAction
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.metadata.CarbonMetadata
-import org.apache.carbondata.core.statusmanager.LoadMetadataDetails
import org.apache.carbondata.core.statusmanager.SegmentStatusManager
import org.apache.carbondata.core.util.CarbonProperties
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/processing/src/main/java/org/apache/carbondata/processing/loading/steps/CarbonRowDataWriterProcessorStepImpl.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/loading/steps/CarbonRowDataWriterProcessorStepImpl.java b/processing/src/main/java/org/apache/carbondata/processing/loading/steps/CarbonRowDataWriterProcessorStepImpl.java
index 4c25ce3..68e8e22 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/loading/steps/CarbonRowDataWriterProcessorStepImpl.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/loading/steps/CarbonRowDataWriterProcessorStepImpl.java
@@ -126,7 +126,7 @@ public class CarbonRowDataWriterProcessorStepImpl extends AbstractDataLoadProces
measureCount = configuration.getMeasureCount();
outputLength = measureCount + (this.noDictWithComplextCount > 0 ? 1 : 0) + 1;
CarbonTimeStatisticsFactory.getLoadStatisticsInstance()
- .recordDictionaryValue2MdkAdd2FileTime(CarbonTablePath.DEPRECATED_PATITION_ID,
+ .recordDictionaryValue2MdkAdd2FileTime(CarbonTablePath.DEPRECATED_PARTITION_ID,
System.currentTimeMillis());
if (iterators.length == 1) {
@@ -218,10 +218,10 @@ public class CarbonRowDataWriterProcessorStepImpl extends AbstractDataLoadProces
}
}
CarbonTimeStatisticsFactory.getLoadStatisticsInstance()
- .recordDictionaryValue2MdkAdd2FileTime(CarbonTablePath.DEPRECATED_PATITION_ID,
+ .recordDictionaryValue2MdkAdd2FileTime(CarbonTablePath.DEPRECATED_PARTITION_ID,
System.currentTimeMillis());
CarbonTimeStatisticsFactory.getLoadStatisticsInstance()
- .recordMdkGenerateTotalTime(CarbonTablePath.DEPRECATED_PATITION_ID,
+ .recordMdkGenerateTotalTime(CarbonTablePath.DEPRECATED_PARTITION_ID,
System.currentTimeMillis());
if (null != exception) {
throw exception;
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/processing/src/main/java/org/apache/carbondata/processing/loading/steps/DataWriterBatchProcessorStepImpl.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/loading/steps/DataWriterBatchProcessorStepImpl.java b/processing/src/main/java/org/apache/carbondata/processing/loading/steps/DataWriterBatchProcessorStepImpl.java
index a0a845b..05b2424 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/loading/steps/DataWriterBatchProcessorStepImpl.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/loading/steps/DataWriterBatchProcessorStepImpl.java
@@ -80,7 +80,7 @@ public class DataWriterBatchProcessorStepImpl extends AbstractDataLoadProcessorS
String tableName = tableIdentifier.getTableName();
try {
CarbonTimeStatisticsFactory.getLoadStatisticsInstance()
- .recordDictionaryValue2MdkAdd2FileTime(CarbonTablePath.DEPRECATED_PATITION_ID,
+ .recordDictionaryValue2MdkAdd2FileTime(CarbonTablePath.DEPRECATED_PARTITION_ID,
System.currentTimeMillis());
int i = 0;
String[] storeLocation = getStoreLocation();
@@ -145,10 +145,10 @@ public class DataWriterBatchProcessorStepImpl extends AbstractDataLoadProcessorS
}
}
CarbonTimeStatisticsFactory.getLoadStatisticsInstance()
- .recordDictionaryValue2MdkAdd2FileTime(CarbonTablePath.DEPRECATED_PATITION_ID,
+ .recordDictionaryValue2MdkAdd2FileTime(CarbonTablePath.DEPRECATED_PARTITION_ID,
System.currentTimeMillis());
CarbonTimeStatisticsFactory.getLoadStatisticsInstance()
- .recordMdkGenerateTotalTime(CarbonTablePath.DEPRECATED_PATITION_ID,
+ .recordMdkGenerateTotalTime(CarbonTablePath.DEPRECATED_PARTITION_ID,
System.currentTimeMillis());
if (null != exception) {
throw exception;
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/processing/src/main/java/org/apache/carbondata/processing/loading/steps/DataWriterProcessorStepImpl.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/loading/steps/DataWriterProcessorStepImpl.java b/processing/src/main/java/org/apache/carbondata/processing/loading/steps/DataWriterProcessorStepImpl.java
index 6899886..7beca48 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/loading/steps/DataWriterProcessorStepImpl.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/loading/steps/DataWriterProcessorStepImpl.java
@@ -112,7 +112,7 @@ public class DataWriterProcessorStepImpl extends AbstractDataLoadProcessorStep {
String tableName = tableIdentifier.getTableName();
try {
CarbonTimeStatisticsFactory.getLoadStatisticsInstance()
- .recordDictionaryValue2MdkAdd2FileTime(CarbonTablePath.DEPRECATED_PATITION_ID,
+ .recordDictionaryValue2MdkAdd2FileTime(CarbonTablePath.DEPRECATED_PARTITION_ID,
System.currentTimeMillis());
rangeExecutorService = Executors.newFixedThreadPool(iterators.length,
new CarbonThreadFactory("WriterForwardPool: " + tableName));
@@ -202,10 +202,10 @@ public class DataWriterProcessorStepImpl extends AbstractDataLoadProcessorStep {
CarbonTimeStatisticsFactory.getLoadStatisticsInstance().recordTotalRecords(rowCounter.get());
processingComplete(dataHandler);
CarbonTimeStatisticsFactory.getLoadStatisticsInstance()
- .recordDictionaryValue2MdkAdd2FileTime(CarbonTablePath.DEPRECATED_PATITION_ID,
+ .recordDictionaryValue2MdkAdd2FileTime(CarbonTablePath.DEPRECATED_PARTITION_ID,
System.currentTimeMillis());
CarbonTimeStatisticsFactory.getLoadStatisticsInstance()
- .recordMdkGenerateTotalTime(CarbonTablePath.DEPRECATED_PATITION_ID,
+ .recordMdkGenerateTotalTime(CarbonTablePath.DEPRECATED_PARTITION_ID,
System.currentTimeMillis());
}
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonDataMergerUtil.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonDataMergerUtil.java b/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonDataMergerUtil.java
index a9a6085..b380888 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonDataMergerUtil.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonDataMergerUtil.java
@@ -335,7 +335,7 @@ public final class CarbonDataMergerUtil {
// create entry for merged one.
LoadMetadataDetails loadMetadataDetails = new LoadMetadataDetails();
- loadMetadataDetails.setPartitionCount(CarbonTablePath.DEPRECATED_PATITION_ID);
+ loadMetadataDetails.setPartitionCount(CarbonTablePath.DEPRECATED_PARTITION_ID);
loadMetadataDetails.setSegmentStatus(SegmentStatus.SUCCESS);
long loadEnddate = CarbonUpdateUtil.readCurrentTime();
loadMetadataDetails.setLoadEndTime(loadEnddate);
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/processing/src/main/java/org/apache/carbondata/processing/sort/sortdata/SortParameters.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/sort/sortdata/SortParameters.java b/processing/src/main/java/org/apache/carbondata/processing/sort/sortdata/SortParameters.java
index 5d78f3f..09dd52f 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/sort/sortdata/SortParameters.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/sort/sortdata/SortParameters.java
@@ -495,7 +495,7 @@ public class SortParameters implements Serializable {
parameters.setCarbonTable(carbonTable);
parameters.setDatabaseName(databaseName);
parameters.setTableName(tableName);
- parameters.setPartitionID(CarbonTablePath.DEPRECATED_PATITION_ID);
+ parameters.setPartitionID(CarbonTablePath.DEPRECATED_PARTITION_ID);
parameters.setSegmentId(segmentId);
parameters.setTaskNo(taskNo);
parameters.setMeasureColCount(measureColCount);
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/processing/src/main/java/org/apache/carbondata/processing/store/TablePage.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/store/TablePage.java b/processing/src/main/java/org/apache/carbondata/processing/store/TablePage.java
index d6e4027..7cc8932 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/store/TablePage.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/store/TablePage.java
@@ -393,7 +393,7 @@ public class TablePage {
private EncodedColumnPage[] encodeAndCompressDimensions()
throws KeyGenException, IOException, MemoryException {
List<EncodedColumnPage> encodedDimensions = new ArrayList<>();
- List<EncodedColumnPage> encodedComplexDimenions = new ArrayList<>();
+ List<EncodedColumnPage> encodedComplexDimensions = new ArrayList<>();
TableSpec tableSpec = model.getTableSpec();
int dictIndex = 0;
int noDictIndex = 0;
@@ -435,7 +435,7 @@ public class TablePage {
case COMPLEX:
EncodedColumnPage[] encodedPages = ColumnPageEncoder.encodeComplexColumn(
complexDimensionPages[complexDimIndex++]);
- encodedComplexDimenions.addAll(Arrays.asList(encodedPages));
+ encodedComplexDimensions.addAll(Arrays.asList(encodedPages));
break;
default:
throw new IllegalArgumentException("unsupported dimension type:" + spec
@@ -443,7 +443,7 @@ public class TablePage {
}
}
- encodedDimensions.addAll(encodedComplexDimenions);
+ encodedDimensions.addAll(encodedComplexDimensions);
return encodedDimensions.toArray(new EncodedColumnPage[encodedDimensions.size()]);
}
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/streaming/src/main/scala/org/apache/carbondata/streaming/parser/RowStreamParserImp.scala
----------------------------------------------------------------------
diff --git a/streaming/src/main/scala/org/apache/carbondata/streaming/parser/RowStreamParserImp.scala b/streaming/src/main/scala/org/apache/carbondata/streaming/parser/RowStreamParserImp.scala
index e279caf..cb12bb6 100644
--- a/streaming/src/main/scala/org/apache/carbondata/streaming/parser/RowStreamParserImp.scala
+++ b/streaming/src/main/scala/org/apache/carbondata/streaming/parser/RowStreamParserImp.scala
@@ -27,7 +27,6 @@ import org.apache.spark.sql.Row
import org.apache.spark.sql.types.StructType
import org.apache.carbondata.core.constants.CarbonCommonConstants
-import org.apache.carbondata.processing.loading
import org.apache.carbondata.processing.loading.ComplexDelimitersEnum
import org.apache.carbondata.processing.loading.constants.DataLoadProcessorConstants
[2/2] carbondata git commit: [CARBONDATA-3208] Remove unused
parameters, imports and optimize the spell errors
Posted by xu...@apache.org.
[CARBONDATA-3208] Remove unused parameters, imports and optimize the spell errors
Remove unused parameters, imports, optimize the spell errors,fix some typos.
This closes #3036
Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/21330b82
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/21330b82
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/21330b82
Branch: refs/heads/master
Commit: 21330b825eaac39568508671aab810587440b716
Parents: b0733ec
Author: Oscar <ru...@163.com>
Authored: Fri Dec 28 07:24:06 2018 +0800
Committer: xubo245 <xu...@huawei.com>
Committed: Mon Dec 31 15:34:52 2018 +0800
----------------------------------------------------------------------
.../core/datastore/block/SegmentProperties.java | 20 ++---
.../core/datastore/page/ColumnPage.java | 6 +-
.../core/scan/partition/PartitionUtil.java | 2 +-
.../core/util/path/CarbonTablePath.java | 4 +-
.../datamap/lucene/LuceneDataMapWriter.java | 2 +-
.../apache/carbondata/mv/datamap/MVHelper.scala | 12 +--
.../mv/plans/util/BirdcageOptimizer.scala | 4 +-
.../hadoop/api/CarbonInputFormat.java | 3 +-
.../lucene/LuceneFineGrainDataMapSuite.scala | 2 +-
...codingSafeColumnPageForComplexDataType.scala | 6 --
...dingUnsafeColumnPageForComplexDataType.scala | 12 +--
.../dataload/TestLoadDataGeneral.scala | 2 +-
.../preaggregate/TestPreAggCreateCommand.scala | 4 -
.../VarcharDataTypesBasicTestCase.scala | 2 +-
.../carbondata/events/AlterTableEvents.scala | 1 -
.../carbondata/spark/CarbonSparkFactory.scala | 2 +-
.../carbondata/spark/PartitionFactory.scala | 2 +-
.../spark/load/GlobalSortHelper.scala | 2 -
.../spark/rdd/AlterTableLoadPartitionRDD.scala | 6 +-
.../spark/rdd/CarbonDropPartitionRDD.scala | 2 +-
.../spark/rdd/CarbonGlobalDictionaryRDD.scala | 12 +--
.../carbondata/spark/rdd/CarbonMergerRDD.scala | 8 +-
.../spark/rdd/NewCarbonDataLoadRDD.scala | 14 +--
.../spark/tasks/SortIndexWriterTask.scala | 1 -
.../spark/util/GlobalDictionaryUtil.scala | 11 +--
.../CarbonStreamingQueryListener.scala | 1 -
.../spark/sql/catalyst/CarbonDDLSqlParser.scala | 5 +-
.../spark/rdd/CarbonDataRDDFactory.scala | 2 +-
.../org/apache/spark/sql/CarbonSession.scala | 2 +-
.../org/apache/spark/sql/CarbonSource.scala | 2 +-
.../CarbonAlterTableFinishStreaming.scala | 2 +-
.../management/CarbonLoadDataCommand.scala | 4 +-
.../management/CarbonShowLoadsCommand.scala | 2 +-
.../spark/sql/execution/command/package.scala | 7 +-
...rbonAlterTableDropHivePartitionCommand.scala | 3 +-
.../strategy/CarbonLateDecodeStrategy.scala | 4 +-
.../apache/spark/sql/hive/CarbonMetaStore.scala | 2 +-
.../sql/hive/CarbonPreAggregateRules.scala | 38 ++++----
.../sql/optimizer/CarbonLateDecodeRule.scala | 4 +-
.../sql/parser/CarbonSpark2SqlParser.scala | 4 +-
.../spark/sql/parser/CarbonSparkSqlParser.scala | 1 -
.../spark/sql/CarbonToSparkAdapater.scala | 2 +-
.../spark/sql/CarbonToSparkAdapater.scala | 91 --------------------
.../apache/spark/sql/CarbonToSparkAdapter.scala | 90 +++++++++++++++++++
.../spark/sql/CarbonToSparkAdapater.scala | 2 +-
.../bloom/BloomCoarseGrainDataMapTestUtil.scala | 2 +-
.../InsertIntoCarbonTableSpark2TestCase.scala | 3 -
.../spark/util/DictionaryLRUCacheTestCase.scala | 2 -
.../BadRecordPathLoadOptionTest.scala | 11 +--
.../NumericDimensionBadRecordTest.scala | 2 -
.../iud/DeleteCarbonTableSubqueryTestCase.scala | 3 -
.../spark/sql/common/util/Spark2QueryTest.scala | 3 +-
.../apache/spark/util/CarbonCommandSuite.scala | 1 -
.../CarbonRowDataWriterProcessorStepImpl.java | 6 +-
.../steps/DataWriterBatchProcessorStepImpl.java | 6 +-
.../steps/DataWriterProcessorStepImpl.java | 6 +-
.../processing/merger/CarbonDataMergerUtil.java | 2 +-
.../sort/sortdata/SortParameters.java | 2 +-
.../carbondata/processing/store/TablePage.java | 6 +-
.../streaming/parser/RowStreamParserImp.scala | 1 -
60 files changed, 209 insertions(+), 257 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/core/src/main/java/org/apache/carbondata/core/datastore/block/SegmentProperties.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/block/SegmentProperties.java b/core/src/main/java/org/apache/carbondata/core/datastore/block/SegmentProperties.java
index d507937..c79ea12 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/block/SegmentProperties.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/block/SegmentProperties.java
@@ -250,7 +250,7 @@ public class SegmentProperties {
int[] columnCardinality) {
ColumnSchema columnSchema = null;
// ordinal will be required to read the data from file block
- int dimensonOrdinal = 0;
+ int dimensionOrdinal = 0;
int measureOrdinal = -1;
// table ordinal is actually a schema ordinal this is required as
// cardinality array
@@ -287,31 +287,31 @@ public class SegmentProperties {
// if it is a columnar dimension participated in mdkey then added
// key ordinal and dimension ordinal
carbonDimension =
- new CarbonDimension(columnSchema, dimensonOrdinal++, keyOrdinal++, -1);
+ new CarbonDimension(columnSchema, dimensionOrdinal++, keyOrdinal++, -1);
}
// as complex type will be stored at last so once complex type started all the dimension
// will be added to complex type
else if (isComplexDimensionStarted || columnSchema.getDataType().isComplexType()) {
cardinalityIndexForComplexDimensionColumn.add(tableOrdinal);
carbonDimension =
- new CarbonDimension(columnSchema, dimensonOrdinal++, -1, ++complexTypeOrdinal);
+ new CarbonDimension(columnSchema, dimensionOrdinal++, -1, ++complexTypeOrdinal);
carbonDimension.initializeChildDimensionsList(columnSchema.getNumberOfChild());
complexDimensions.add(carbonDimension);
isComplexDimensionStarted = true;
- int previouseOrdinal = dimensonOrdinal;
- dimensonOrdinal =
- readAllComplexTypeChildren(dimensonOrdinal, columnSchema.getNumberOfChild(),
+ int previousOrdinal = dimensionOrdinal;
+ dimensionOrdinal =
+ readAllComplexTypeChildren(dimensionOrdinal, columnSchema.getNumberOfChild(),
columnsInTable, carbonDimension, complexTypeOrdinal);
- int numberOfChildrenDimensionAdded = dimensonOrdinal - previouseOrdinal;
+ int numberOfChildrenDimensionAdded = dimensionOrdinal - previousOrdinal;
for (int i = 0; i < numberOfChildrenDimensionAdded; i++) {
cardinalityIndexForComplexDimensionColumn.add(++tableOrdinal);
}
- counter = dimensonOrdinal;
+ counter = dimensionOrdinal;
complexTypeOrdinal = assignComplexOrdinal(carbonDimension, complexTypeOrdinal);
continue;
} else {
// for no dictionary dimension
- carbonDimension = new CarbonDimension(columnSchema, dimensonOrdinal++, -1, -1);
+ carbonDimension = new CarbonDimension(columnSchema, dimensionOrdinal++, -1, -1);
numberOfNoDictionaryDimension++;
if (columnSchema.isSortColumn()) {
this.numberOfSortColumns++;
@@ -324,7 +324,7 @@ public class SegmentProperties {
}
counter++;
}
- lastDimensionColOrdinal = dimensonOrdinal;
+ lastDimensionColOrdinal = dimensionOrdinal;
dimColumnsCardinality = new int[cardinalityIndexForNormalDimensionColumn.size()];
complexDimColumnCardinality = new int[cardinalityIndexForComplexDimensionColumn.size()];
int index = 0;
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/core/src/main/java/org/apache/carbondata/core/datastore/page/ColumnPage.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/page/ColumnPage.java b/core/src/main/java/org/apache/carbondata/core/datastore/page/ColumnPage.java
index 51dfbf2..22c5536 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/page/ColumnPage.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/page/ColumnPage.java
@@ -746,7 +746,7 @@ public abstract class ColumnPage {
} else if (dataType == DataTypes.BYTE_ARRAY) {
return getLVFlattenedBytePage().length;
} else {
- throw new UnsupportedOperationException("unsupport compress column page: " + dataType);
+ throw new UnsupportedOperationException("unsupported compress column page: " + dataType);
}
}
@@ -785,7 +785,7 @@ public abstract class ColumnPage {
} else if (dataType == DataTypes.BYTE_ARRAY) {
return compressor.compressByte(getLVFlattenedBytePage());
} else {
- throw new UnsupportedOperationException("unsupport compress column page: " + dataType);
+ throw new UnsupportedOperationException("unsupported compress column page: " + dataType);
}
}
@@ -851,7 +851,7 @@ public abstract class ColumnPage {
CarbonCommonConstants.INT_SIZE_IN_BYTE, meta.getCompressorName());
} else {
throw new UnsupportedOperationException(
- "unsupport uncompress column page: " + meta.getStoreDataType());
+ "unsupported uncompress column page: " + meta.getStoreDataType());
}
}
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/core/src/main/java/org/apache/carbondata/core/scan/partition/PartitionUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/partition/PartitionUtil.java b/core/src/main/java/org/apache/carbondata/core/scan/partition/PartitionUtil.java
index 676cf48..66ceeb8 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/partition/PartitionUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/partition/PartitionUtil.java
@@ -39,7 +39,7 @@ public class PartitionUtil {
return new RangePartitioner(partitionInfo);
default:
throw new UnsupportedOperationException(
- "unsupport partition type: " + partitionInfo.getPartitionType().name());
+ "unsupported partition type: " + partitionInfo.getPartitionType().name());
}
}
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/core/src/main/java/org/apache/carbondata/core/util/path/CarbonTablePath.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/util/path/CarbonTablePath.java b/core/src/main/java/org/apache/carbondata/core/util/path/CarbonTablePath.java
index f1df66a..8538e37 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/path/CarbonTablePath.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/path/CarbonTablePath.java
@@ -322,11 +322,11 @@ public class CarbonTablePath {
}
// This partition is not used in any code logic, just keep backward compatibility
- public static final String DEPRECATED_PATITION_ID = "0";
+ public static final String DEPRECATED_PARTITION_ID = "0";
public static String getPartitionDir(String tablePath) {
return getFactDir(tablePath) + File.separator + PARTITION_PREFIX +
- CarbonTablePath.DEPRECATED_PATITION_ID;
+ CarbonTablePath.DEPRECATED_PARTITION_ID;
}
public static String getFactDir(String tablePath) {
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneDataMapWriter.java
----------------------------------------------------------------------
diff --git a/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneDataMapWriter.java b/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneDataMapWriter.java
index 9fd9409..1004753 100644
--- a/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneDataMapWriter.java
+++ b/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneDataMapWriter.java
@@ -346,7 +346,7 @@ public class LuceneDataMapWriter extends DataMapWriter {
} else if (type == DataTypes.BOOLEAN) {
value = page.getBoolean(rowId);
} else {
- LOGGER.error("unsupport data type " + type);
+ LOGGER.error("unsupported data type " + type);
throw new RuntimeException("unsupported data type " + type);
}
return value;
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVHelper.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVHelper.scala b/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVHelper.scala
index 9da109a..4c7fbc4 100644
--- a/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVHelper.scala
+++ b/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVHelper.scala
@@ -22,7 +22,7 @@ import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
-import org.apache.spark.sql.{CarbonEnv, CarbonToSparkAdapater, SparkSession}
+import org.apache.spark.sql.{CarbonEnv, CarbonToSparkAdapter, SparkSession}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.catalog.CatalogTable
import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, AttributeReference, Cast, Expression, NamedExpression, ScalaUDF, SortOrder}
@@ -300,7 +300,7 @@ object MVHelper {
case Alias(agg: AggregateExpression, name) =>
agg.aggregateFunction.collect {
case attr: AttributeReference =>
- CarbonToSparkAdapater.createAttributeReference(attr.name,
+ CarbonToSparkAdapter.createAttributeReference(attr.name,
attr.dataType,
attr.nullable,
attr.metadata,
@@ -317,7 +317,7 @@ object MVHelper {
expressions.map {
case alias@Alias(agg: AggregateExpression, name) =>
attrMap.get(AttributeKey(agg)).map { exp =>
- CarbonToSparkAdapater.createAliasRef(
+ CarbonToSparkAdapter.createAliasRef(
getAttribute(exp),
name,
alias.exprId,
@@ -329,7 +329,7 @@ object MVHelper {
case attr: AttributeReference =>
val uattr = attrMap.get(AttributeKey(attr)).map{a =>
if (keepAlias) {
- CarbonToSparkAdapater.createAttributeReference(a.name,
+ CarbonToSparkAdapter.createAttributeReference(a.name,
a.dataType,
a.nullable,
a.metadata,
@@ -343,7 +343,7 @@ object MVHelper {
uattr
case alias@Alias(expression: Expression, name) =>
attrMap.get(AttributeKey(expression)).map { exp =>
- CarbonToSparkAdapater
+ CarbonToSparkAdapter
.createAliasRef(getAttribute(exp), name, alias.exprId, alias.qualifier,
alias.explicitMetadata, Some(alias))
}.getOrElse(alias)
@@ -385,7 +385,7 @@ object MVHelper {
case attr: AttributeReference =>
val uattr = attrMap.get(AttributeKey(attr)).map{a =>
if (keepAlias) {
- CarbonToSparkAdapater
+ CarbonToSparkAdapter
.createAttributeReference(a.name,
a.dataType,
a.nullable,
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/BirdcageOptimizer.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/BirdcageOptimizer.scala b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/BirdcageOptimizer.scala
index e1e891a..42cf15c 100644
--- a/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/BirdcageOptimizer.scala
+++ b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/BirdcageOptimizer.scala
@@ -17,7 +17,7 @@
package org.apache.carbondata.mv.plans.util
-import org.apache.spark.sql.CarbonToSparkAdapater
+import org.apache.spark.sql.CarbonToSparkAdapter
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.optimizer._
@@ -128,7 +128,7 @@ object BirdcageOptimizer extends RuleExecutor[LogicalPlan] {
// ConvertToLocalRelation,
// PropagateEmptyRelation) ::
Batch(
- "OptimizeCodegen", Once, CarbonToSparkAdapater.getOptimizeCodegenRule(conf): _*) ::
+ "OptimizeCodegen", Once, CarbonToSparkAdapter.getOptimizeCodegenRule(conf): _*) ::
Batch(
"RewriteSubquery", Once,
RewritePredicateSubquery,
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java
index c35862a..9b43877 100644
--- a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java
+++ b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java
@@ -322,7 +322,8 @@ m filterExpression
ObjectSerializationUtil.convertObjectToString(new ArrayList<>(partitions));
configuration.set(PARTITIONS_TO_PRUNE, partitionString);
} catch (Exception e) {
- throw new RuntimeException("Error while setting patition information to Job" + partitions, e);
+ throw new RuntimeException(
+ "Error while setting partition information to Job" + partitions, e);
}
}
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
index 46ffefa..d9678be 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
@@ -22,8 +22,8 @@ import java.io.{File, PrintWriter}
import scala.util.Random
import org.apache.spark.SparkException
-import org.apache.spark.sql.{CarbonEnv, Row}
import org.apache.spark.sql.test.util.QueryTest
+import org.apache.spark.sql.Row
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.common.exceptions.sql.{MalformedCarbonCommandException, MalformedDataMapCommandException}
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestAdaptiveEncodingSafeColumnPageForComplexDataType.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestAdaptiveEncodingSafeColumnPageForComplexDataType.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestAdaptiveEncodingSafeColumnPageForComplexDataType.scala
index 75d08bb..1729d66 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestAdaptiveEncodingSafeColumnPageForComplexDataType.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestAdaptiveEncodingSafeColumnPageForComplexDataType.scala
@@ -17,12 +17,6 @@
package org.apache.carbondata.spark.testsuite.dataload
-import java.io.File
-import java.sql.Timestamp
-
-import scala.collection.mutable
-
-import org.apache.spark.sql.Row
import org.apache.spark.sql.test.util.QueryTest
import org.scalatest.BeforeAndAfterAll
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestAdaptiveEncodingUnsafeColumnPageForComplexDataType.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestAdaptiveEncodingUnsafeColumnPageForComplexDataType.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestAdaptiveEncodingUnsafeColumnPageForComplexDataType.scala
index 8375195..74c3778 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestAdaptiveEncodingUnsafeColumnPageForComplexDataType.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestAdaptiveEncodingUnsafeColumnPageForComplexDataType.scala
@@ -17,19 +17,13 @@
package org.apache.carbondata.spark.testsuite.dataload
-import java.io.{File, PrintWriter}
-import java.sql.Timestamp
-
-import scala.collection.mutable
-import scala.util.Random
-
-import org.apache.spark.sql.Row
-import org.apache.spark.sql.test.util.QueryTest
-import org.scalatest.BeforeAndAfterAll
+import java.io.File
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.CarbonProperties
import org.apache.carbondata.integration.spark.testsuite.complexType.TestAdaptiveComplexType
+import org.apache.spark.sql.test.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
/**
* Test class of Adaptive Encoding UnSafe Column Page with Complex Data type
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataGeneral.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataGeneral.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataGeneral.scala
index 02abb8d..561adba 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataGeneral.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataGeneral.scala
@@ -26,7 +26,7 @@ import org.scalatest.BeforeAndAfterEach
import org.apache.carbondata.core.util.path.CarbonTablePath
import org.apache.carbondata.core.datastore.impl.FileFactory
-import org.apache.carbondata.core.metadata.{CarbonMetadata, SegmentFileStore}
+import org.apache.carbondata.core.metadata.CarbonMetadata
import org.apache.spark.sql.test.util.QueryTest
import org.apache.carbondata.core.constants.{CarbonCommonConstants, CarbonLoadOptionConstants}
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggCreateCommand.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggCreateCommand.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggCreateCommand.scala
index c039c0f..d70e179 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggCreateCommand.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggCreateCommand.scala
@@ -22,10 +22,6 @@ import java.util
import java.util.concurrent.{Callable, ExecutorService, Executors, TimeUnit}
import scala.collection.JavaConverters._
-import scala.concurrent.ExecutionContext.Implicits.global
-import scala.concurrent.{Await, Future}
-import scala.concurrent.duration.Duration
-import scala.util.{Failure, Success}
import org.apache.spark.sql.{AnalysisException, CarbonDatasourceHadoopRelation, Row}
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/longstring/VarcharDataTypesBasicTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/longstring/VarcharDataTypesBasicTestCase.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/longstring/VarcharDataTypesBasicTestCase.scala
index 3148cac..c84ee92 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/longstring/VarcharDataTypesBasicTestCase.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/longstring/VarcharDataTypesBasicTestCase.scala
@@ -165,7 +165,7 @@ class VarcharDataTypesBasicTestCase extends QueryTest with BeforeAndAfterEach wi
assert(exceptionCaught.getMessage.contains("does not exist in table"))
}
- test("long_string_columns: columns cannot exist in patitions columns") {
+ test("long_string_columns: columns cannot exist in partitions columns") {
val exceptionCaught = intercept[MalformedCarbonCommandException] {
sql(
s"""
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark-common/src/main/scala/org/apache/carbondata/events/AlterTableEvents.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/events/AlterTableEvents.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/events/AlterTableEvents.scala
index c99237e..56f4d29 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/events/AlterTableEvents.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/events/AlterTableEvents.scala
@@ -20,7 +20,6 @@ import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec
import org.apache.spark.sql.execution.command._
-import org.apache.carbondata.core.indexstore.PartitionSpec
import org.apache.carbondata.core.metadata.schema.table.CarbonTable
import org.apache.carbondata.processing.loading.model.CarbonLoadModel
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark-common/src/main/scala/org/apache/carbondata/spark/CarbonSparkFactory.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/CarbonSparkFactory.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/CarbonSparkFactory.scala
index 3dd9903..d89b46d 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/CarbonSparkFactory.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/CarbonSparkFactory.scala
@@ -16,7 +16,7 @@
*/
package org.apache.carbondata.spark
-import org.apache.carbondata.core.metadata.{CarbonTableIdentifier, ColumnIdentifier}
+import org.apache.carbondata.core.metadata.ColumnIdentifier
import org.apache.carbondata.core.metadata.schema.table.column.{CarbonDimension, ColumnSchema}
/**
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark-common/src/main/scala/org/apache/carbondata/spark/PartitionFactory.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/PartitionFactory.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/PartitionFactory.scala
index 02cb60b..4da2d9a 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/PartitionFactory.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/PartitionFactory.scala
@@ -32,7 +32,7 @@ object PartitionFactory {
case PartitionType.LIST => new ListPartitioner(partitionInfo)
case PartitionType.RANGE => new RangePartitioner(partitionInfo)
case partitionType =>
- throw new CarbonDataLoadingException(s"Unsupport partition type: $partitionType")
+ throw new CarbonDataLoadingException(s"Unsupported partition type: $partitionType")
}
}
}
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark-common/src/main/scala/org/apache/carbondata/spark/load/GlobalSortHelper.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/load/GlobalSortHelper.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/load/GlobalSortHelper.scala
index 4e3fc88..7f80e3e 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/load/GlobalSortHelper.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/load/GlobalSortHelper.scala
@@ -20,9 +20,7 @@ package org.apache.carbondata.spark.load
import org.apache.spark.Accumulator
import org.apache.carbondata.common.logging.LogServiceFactory
-import org.apache.carbondata.core.metadata.CarbonTableIdentifier
import org.apache.carbondata.processing.loading.model.CarbonLoadModel
-import org.apache.carbondata.processing.loading.BadRecordsLogger
object GlobalSortHelper {
private val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/AlterTableLoadPartitionRDD.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/AlterTableLoadPartitionRDD.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/AlterTableLoadPartitionRDD.scala
index 4322359..fd2cd19 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/AlterTableLoadPartitionRDD.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/AlterTableLoadPartitionRDD.scala
@@ -25,12 +25,12 @@ import org.apache.spark.sql.execution.command.AlterPartitionModel
import org.apache.spark.util.PartitionUtils
import org.apache.carbondata.common.logging.LogServiceFactory
-import org.apache.carbondata.core.metadata.{AbsoluteTableIdentifier, CarbonMetadata}
+import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier
import org.apache.carbondata.processing.loading.TableProcessingOperations
import org.apache.carbondata.processing.partition.spliter.RowResultProcessor
-import org.apache.carbondata.processing.util.{CarbonDataProcessorUtil, CarbonLoaderUtil}
+import org.apache.carbondata.processing.util.CarbonDataProcessorUtil
import org.apache.carbondata.spark.AlterPartitionResult
-import org.apache.carbondata.spark.util.{CommonUtil, Util}
+import org.apache.carbondata.spark.util.CommonUtil
class AlterTableLoadPartitionRDD[K, V](alterPartitionModel: AlterPartitionModel,
result: AlterPartitionResult[K, V],
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDropPartitionRDD.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDropPartitionRDD.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDropPartitionRDD.scala
index efa92b0..e950167 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDropPartitionRDD.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDropPartitionRDD.scala
@@ -28,7 +28,7 @@ import org.apache.carbondata.core.datamap.Segment
import org.apache.carbondata.core.indexstore.PartitionSpec
import org.apache.carbondata.core.metadata.SegmentFileStore
-case class CarbonDropPartition(rddId: Int, val idx: Int, segment: Segment)
+case class CarbonDropPartition(rddId: Int, idx: Int, segment: Segment)
extends Partition {
override val index: Int = idx
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonGlobalDictionaryRDD.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonGlobalDictionaryRDD.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonGlobalDictionaryRDD.scala
index b67fc71..adfdaad 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonGlobalDictionaryRDD.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonGlobalDictionaryRDD.scala
@@ -483,12 +483,12 @@ class CarbonGlobalDictionaryGenerateRDD(
}
/**
- * Set column dictionry patition format
+ * Set column dictionary partition format
*
* @param id partition id
* @param dimension current carbon dimension
*/
-class CarbonColumnDictPatition(id: Int, dimension: CarbonDimension)
+class CarbonColumnDictPartition(id: Int, dimension: CarbonDimension)
extends Partition {
override val index: Int = id
val preDefDictDimension: CarbonDimension = dimension
@@ -499,9 +499,8 @@ class CarbonColumnDictPatition(id: Int, dimension: CarbonDimension)
* Use external column dict to generate global dictionary
*
* @param carbonLoadModel carbon load model
- * @param sparkSession spark context
* @param table carbon table identifier
- * @param dimensions carbon dimenisons having predefined dict
+ * @param dimensions carbon dimensions having predefined dict
* @param dictFolderPath path of dictionary folder
*/
class CarbonColumnDictGenerateRDD(
@@ -518,14 +517,14 @@ class CarbonColumnDictGenerateRDD(
val primDimLength = primDimensions.length
val result = new Array[Partition](primDimLength)
for (i <- 0 until primDimLength) {
- result(i) = new CarbonColumnDictPatition(i, primDimensions(i))
+ result(i) = new CarbonColumnDictPartition(i, primDimensions(i))
}
result
}
override def internalCompute(split: Partition, context: TaskContext)
: Iterator[(Int, ColumnDistinctValues)] = {
- val theSplit = split.asInstanceOf[CarbonColumnDictPatition]
+ val theSplit = split.asInstanceOf[CarbonColumnDictPartition]
val primDimension = theSplit.preDefDictDimension
// read the column dict data
val preDefDictFilePath = carbonLoadModel.getPredefDictFilePath(primDimension)
@@ -580,4 +579,5 @@ class CarbonColumnDictGenerateRDD(
Array((distinctValues._1,
ColumnDistinctValues(distinctValues._2.toArray, 0L))).iterator
}
+
}
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala
index 5bcc49b..6b2ee67 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala
@@ -22,11 +22,9 @@ import java.util
import java.util.{Collections, List}
import java.util.concurrent.atomic.AtomicInteger
-import scala.collection.JavaConverters._
import scala.collection.mutable
+import scala.collection.JavaConverters._
-import org.apache.hadoop.conf.Configuration
-import org.apache.hadoop.fs.Path
import org.apache.hadoop.mapred.JobConf
import org.apache.hadoop.mapreduce.Job
import org.apache.spark._
@@ -43,7 +41,7 @@ import org.apache.carbondata.core.datamap.Segment
import org.apache.carbondata.core.datastore.block._
import org.apache.carbondata.core.datastore.impl.FileFactory
import org.apache.carbondata.core.indexstore.PartitionSpec
-import org.apache.carbondata.core.metadata.{AbsoluteTableIdentifier, CarbonMetadata, CarbonTableIdentifier}
+import org.apache.carbondata.core.metadata.{AbsoluteTableIdentifier, CarbonTableIdentifier}
import org.apache.carbondata.core.metadata.blocklet.DataFileFooter
import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema
import org.apache.carbondata.core.mutate.UpdateVO
@@ -58,7 +56,7 @@ import org.apache.carbondata.processing.loading.model.CarbonLoadModel
import org.apache.carbondata.processing.merger._
import org.apache.carbondata.processing.util.{CarbonDataProcessorUtil, CarbonLoaderUtil}
import org.apache.carbondata.spark.MergeResult
-import org.apache.carbondata.spark.util.{CarbonScalaUtil, CommonUtil, Util}
+import org.apache.carbondata.spark.util.{CarbonScalaUtil, CommonUtil}
class CarbonMergerRDD[K, V](
@transient private val ss: SparkSession,
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
index f44bb8d..1140e72 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
@@ -79,7 +79,7 @@ class SparkPartitionLoader(model: CarbonLoadModel,
System.getProperty("user.dir") + '/' + "conf" + '/' + "carbon.properties")
}
CarbonTimeStatisticsFactory.getLoadStatisticsInstance.initPartitionInfo(
- CarbonTablePath.DEPRECATED_PATITION_ID)
+ CarbonTablePath.DEPRECATED_PARTITION_ID)
CarbonProperties.getInstance().addProperty("carbon.is.columnar.storage", "true")
CarbonProperties.getInstance().addProperty("carbon.dimension.split.value.in.columnar", "1")
CarbonProperties.getInstance().addProperty("carbon.is.fullyfilled.bits", "true")
@@ -128,7 +128,7 @@ class NewCarbonDataLoadRDD[K, V](
val uniqueLoadStatusId =
carbonLoadModel.getTableName + CarbonCommonConstants.UNDERSCORE + theSplit.index
try {
- loadMetadataDetails.setPartitionCount(CarbonTablePath.DEPRECATED_PATITION_ID)
+ loadMetadataDetails.setPartitionCount(CarbonTablePath.DEPRECATED_PARTITION_ID)
loadMetadataDetails.setSegmentStatus(SegmentStatus.SUCCESS)
val preFetch = CarbonProperties.getInstance().getProperty(CarbonCommonConstants
@@ -170,7 +170,7 @@ class NewCarbonDataLoadRDD[K, V](
// So print the data load statistics only in case of non failure case
if (SegmentStatus.LOAD_FAILURE != loadMetadataDetails.getSegmentStatus) {
CarbonTimeStatisticsFactory.getLoadStatisticsInstance
- .printStatisticsInfo(CarbonTablePath.DEPRECATED_PATITION_ID)
+ .printStatisticsInfo(CarbonTablePath.DEPRECATED_PARTITION_ID)
}
}
@@ -259,7 +259,7 @@ class NewDataFrameLoaderRDD[K, V](
carbonLoadModel.getTableName + CarbonCommonConstants.UNDERSCORE + theSplit.index
try {
- loadMetadataDetails.setPartitionCount(CarbonTablePath.DEPRECATED_PATITION_ID)
+ loadMetadataDetails.setPartitionCount(CarbonTablePath.DEPRECATED_PARTITION_ID)
loadMetadataDetails.setSegmentStatus(SegmentStatus.SUCCESS)
carbonLoadModel.setTaskNo(String.valueOf(theSplit.index))
carbonLoadModel.setPreFetch(false)
@@ -305,7 +305,7 @@ class NewDataFrameLoaderRDD[K, V](
// So print the data load statistics only in case of non failure case
if (SegmentStatus.LOAD_FAILURE != loadMetadataDetails.getSegmentStatus) {
CarbonTimeStatisticsFactory.getLoadStatisticsInstance
- .printStatisticsInfo(CarbonTablePath.DEPRECATED_PATITION_ID)
+ .printStatisticsInfo(CarbonTablePath.DEPRECATED_PARTITION_ID)
}
}
var finished = false
@@ -466,7 +466,7 @@ class PartitionTableDataLoaderRDD[K, V](
carbonLoadModel.getTableName + CarbonCommonConstants.UNDERSCORE + theSplit.index
try {
- loadMetadataDetails.setPartitionCount(CarbonTablePath.DEPRECATED_PATITION_ID)
+ loadMetadataDetails.setPartitionCount(CarbonTablePath.DEPRECATED_PARTITION_ID)
loadMetadataDetails.setSegmentStatus(SegmentStatus.SUCCESS)
carbonLoadModel.setTaskNo(String.valueOf(partitionInfo.getPartitionId(theSplit.index)))
carbonLoadModel.setPreFetch(false)
@@ -503,7 +503,7 @@ class PartitionTableDataLoaderRDD[K, V](
// So print the data load statistics only in case of non failure case
if (SegmentStatus.LOAD_FAILURE != loadMetadataDetails.getSegmentStatus) {
CarbonTimeStatisticsFactory.getLoadStatisticsInstance
- .printStatisticsInfo(CarbonTablePath.DEPRECATED_PATITION_ID)
+ .printStatisticsInfo(CarbonTablePath.DEPRECATED_PARTITION_ID)
}
}
var finished = false
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark-common/src/main/scala/org/apache/carbondata/spark/tasks/SortIndexWriterTask.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/tasks/SortIndexWriterTask.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/tasks/SortIndexWriterTask.scala
index f212120..2d8ed1d 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/tasks/SortIndexWriterTask.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/tasks/SortIndexWriterTask.scala
@@ -17,7 +17,6 @@
package org.apache.carbondata.spark.tasks
import org.apache.carbondata.core.cache.dictionary.{Dictionary, DictionaryColumnUniqueIdentifier}
-import org.apache.carbondata.core.metadata.{CarbonTableIdentifier, ColumnIdentifier}
import org.apache.carbondata.core.metadata.datatype.DataType
import org.apache.carbondata.core.service.CarbonCommonFactory
import org.apache.carbondata.core.writer.sortindex.{CarbonDictionarySortIndexWriter, CarbonDictionarySortInfo, CarbonDictionarySortInfoPreparator}
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtil.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtil.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtil.scala
index 922eadb..c16d935 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtil.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtil.scala
@@ -17,11 +17,10 @@
package org.apache.carbondata.spark.util
-import java.nio.charset.Charset
import java.util.regex.Pattern
-import scala.collection.JavaConverters._
import scala.collection.mutable
+import scala.collection.JavaConverters._
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet}
import scala.language.implicitConversions
import scala.util.control.Breaks.{break, breakable}
@@ -40,7 +39,7 @@ import org.apache.spark.sql._
import org.apache.spark.util.FileUtils
import org.apache.carbondata.common.logging.LogServiceFactory
-import org.apache.carbondata.core.cache.dictionary.{Dictionary, DictionaryColumnUniqueIdentifier}
+import org.apache.carbondata.core.cache.dictionary.DictionaryColumnUniqueIdentifier
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.datastore.impl.FileFactory
import org.apache.carbondata.core.locks.{CarbonLockFactory, LockUsage}
@@ -48,17 +47,13 @@ import org.apache.carbondata.core.metadata.{AbsoluteTableIdentifier, CarbonTable
import org.apache.carbondata.core.metadata.datatype.DataTypes
import org.apache.carbondata.core.metadata.encoder.Encoding
import org.apache.carbondata.core.metadata.schema.table.column.{CarbonDimension, ColumnSchema}
-import org.apache.carbondata.core.reader.CarbonDictionaryReader
-import org.apache.carbondata.core.service.CarbonCommonFactory
import org.apache.carbondata.core.statusmanager.SegmentStatus
import org.apache.carbondata.core.util.{CarbonProperties, CarbonUtil, DataTypeUtil}
import org.apache.carbondata.core.util.path.CarbonTablePath
-import org.apache.carbondata.core.writer.CarbonDictionaryWriter
import org.apache.carbondata.processing.exception.DataLoadingException
import org.apache.carbondata.processing.loading.csvinput.{CSVInputFormat, StringArrayWritable}
import org.apache.carbondata.processing.loading.exception.NoRetryException
import org.apache.carbondata.processing.loading.model.CarbonLoadModel
-import org.apache.carbondata.processing.util.CarbonLoaderUtil
import org.apache.carbondata.spark.CarbonSparkFactory
import org.apache.carbondata.spark.rdd._
import org.apache.carbondata.spark.tasks.{DictionaryWriterTask, SortIndexWriterTask}
@@ -387,7 +382,7 @@ object GlobalDictionaryUtil {
* @param table carbon table identifier
* @param colName user specified column name for predefined dict
* @param colDictPath column dictionary file path
- * @param parentDimName parent dimenion for complex type
+ * @param parentDimName parent dimension for complex type
*/
def setPredefineDict(carbonLoadModel: CarbonLoadModel,
dimensions: Array[CarbonDimension],
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark-common/src/main/scala/org/apache/carbondata/streaming/CarbonStreamingQueryListener.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/streaming/CarbonStreamingQueryListener.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/streaming/CarbonStreamingQueryListener.scala
index ebb1a41..b61b9ee 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/streaming/CarbonStreamingQueryListener.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/streaming/CarbonStreamingQueryListener.scala
@@ -25,7 +25,6 @@ import org.apache.spark.sql.execution.streaming.{CarbonAppendableStreamSink, Str
import org.apache.spark.sql.streaming.StreamingQueryListener
import org.apache.carbondata.common.logging.LogServiceFactory
-import org.apache.carbondata.core.locks.{CarbonLockFactory, ICarbonLock, LockUsage}
class CarbonStreamingQueryListener(spark: SparkSession) extends StreamingQueryListener {
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
index 7d5c170..7feb51c 100644
--- a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
+++ b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
@@ -18,13 +18,10 @@
package org.apache.spark.sql.catalyst
import java.text.SimpleDateFormat
-import java.util.regex.{Matcher, Pattern}
import scala.collection.JavaConverters._
-import scala.collection.mutable
import scala.collection.mutable.{ArrayBuffer, LinkedHashSet, Map}
import scala.language.implicitConversions
-import scala.util.Try
import scala.util.matching.Regex
import org.apache.hadoop.hive.ql.lib.Node
@@ -267,7 +264,7 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
}
/**
- * This will prepate the Model from the Tree details.
+ * This will prepare the Model from the Tree details.
*
* @param ifNotExistPresent
* @param dbName
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala b/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
index 5d03026..92d1791 100644
--- a/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
+++ b/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
@@ -744,7 +744,7 @@ object CarbonDataRDDFactory {
CarbonCommonConstants.UNDERSCORE +
(index + "_0")
- loadMetadataDetails.setPartitionCount(CarbonTablePath.DEPRECATED_PATITION_ID)
+ loadMetadataDetails.setPartitionCount(CarbonTablePath.DEPRECATED_PARTITION_ID)
loadMetadataDetails.setLoadName(segId)
loadMetadataDetails.setSegmentStatus(SegmentStatus.LOAD_FAILURE)
carbonLoadModel.setSegmentId(segId)
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSession.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSession.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSession.scala
index 2283b89..7b1bf4c 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSession.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSession.scala
@@ -264,7 +264,7 @@ object CarbonSession {
// Register a successfully instantiated context to the singleton. This should be at the
// end of the class definition so that the singleton is updated only if there is no
// exception in the construction of the instance.
- CarbonToSparkAdapater.addSparkListener(sparkContext)
+ CarbonToSparkAdapter.addSparkListener(sparkContext)
session.streams.addListener(new CarbonStreamingQueryListener(session))
}
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSource.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSource.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSource.scala
index 7f72d42..376d121 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSource.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSource.scala
@@ -331,7 +331,7 @@ object CarbonSource {
properties,
query)
// updating params
- val updatedFormat = CarbonToSparkAdapater
+ val updatedFormat = CarbonToSparkAdapter
.getUpdatedStorageFormat(storageFormat, map, tablePath)
tableDesc.copy(storage = updatedFormat)
} else {
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAlterTableFinishStreaming.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAlterTableFinishStreaming.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAlterTableFinishStreaming.scala
index 8df0217..c1e8029 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAlterTableFinishStreaming.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAlterTableFinishStreaming.scala
@@ -20,7 +20,7 @@ package org.apache.spark.sql.execution.command.management
import org.apache.spark.sql.{CarbonEnv, Row, SparkSession}
import org.apache.spark.sql.execution.command.MetadataCommand
-import org.apache.carbondata.common.logging.{LogService, LogServiceFactory}
+import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.core.locks.{CarbonLockFactory, LockUsage}
import org.apache.carbondata.streaming.segment.StreamSegment
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala
index cd4b0ae..67172af 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala
@@ -885,7 +885,7 @@ case class CarbonLoadDataCommand(
// datatype is always int
val column = table.getColumnByName(table.getTableName, attr.name)
if (column.hasEncoding(Encoding.DICTIONARY)) {
- CarbonToSparkAdapater.createAttributeReference(attr.name,
+ CarbonToSparkAdapter.createAttributeReference(attr.name,
IntegerType,
attr.nullable,
attr.metadata,
@@ -893,7 +893,7 @@ case class CarbonLoadDataCommand(
attr.qualifier,
attr)
} else if (attr.dataType == TimestampType || attr.dataType == DateType) {
- CarbonToSparkAdapater.createAttributeReference(attr.name,
+ CarbonToSparkAdapter.createAttributeReference(attr.name,
LongType,
attr.nullable,
attr.metadata,
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonShowLoadsCommand.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonShowLoadsCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonShowLoadsCommand.scala
index 4a35e6e..dd12f34 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonShowLoadsCommand.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonShowLoadsCommand.scala
@@ -20,7 +20,7 @@ package org.apache.spark.sql.execution.command.management
import org.apache.spark.sql.{CarbonEnv, Row, SparkSession}
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference}
import org.apache.spark.sql.execution.command.{Checker, DataCommand}
-import org.apache.spark.sql.types.{StringType, TimestampType}
+import org.apache.spark.sql.types.StringType
import org.apache.carbondata.api.CarbonStore
import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/package.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/package.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/package.scala
index 8073f90..983e6b4 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/package.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/package.scala
@@ -47,7 +47,7 @@ object Checker {
/**
* Operation that modifies metadata(schema, table_status, etc)
*/
-trait MetadataProcessOpeation {
+trait MetadataProcessOperation {
def processMetadata(sparkSession: SparkSession): Seq[Row]
// call this to throw exception when processMetadata failed
@@ -117,7 +117,8 @@ trait Auditable {
/**
* Command that modifies metadata(schema, table_status, etc) only without processing data
*/
-abstract class MetadataCommand extends RunnableCommand with MetadataProcessOpeation with Auditable {
+abstract class MetadataCommand
+ extends RunnableCommand with MetadataProcessOperation with Auditable {
override def run(sparkSession: SparkSession): Seq[Row] = {
runWithAudit(processMetadata, sparkSession)
}
@@ -138,7 +139,7 @@ abstract class DataCommand extends RunnableCommand with DataProcessOperation wit
* if process data failed.
*/
abstract class AtomicRunnableCommand
- extends RunnableCommand with MetadataProcessOpeation with DataProcessOperation with Auditable {
+ extends RunnableCommand with MetadataProcessOperation with DataProcessOperation with Auditable {
override def run(sparkSession: SparkSession): Seq[Row] = {
runWithAudit(spark => {
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/partition/CarbonAlterTableDropHivePartitionCommand.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/partition/CarbonAlterTableDropHivePartitionCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/partition/CarbonAlterTableDropHivePartitionCommand.scala
index a4629f8..a9b581c 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/partition/CarbonAlterTableDropHivePartitionCommand.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/partition/CarbonAlterTableDropHivePartitionCommand.scala
@@ -22,7 +22,7 @@ import java.util.UUID
import scala.collection.JavaConverters._
-import org.apache.spark.sql.{AnalysisException, CarbonEnv, Row, SparkSession}
+import org.apache.spark.sql.{CarbonEnv, Row, SparkSession}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec
import org.apache.spark.sql.execution.command.{AlterTableAddPartitionCommand, AlterTableDropPartitionCommand, AtomicRunnableCommand}
@@ -35,7 +35,6 @@ import org.apache.carbondata.core.locks.{ICarbonLock, LockUsage}
import org.apache.carbondata.core.metadata.SegmentFileStore
import org.apache.carbondata.core.metadata.schema.table.CarbonTable
import org.apache.carbondata.core.statusmanager.SegmentStatusManager
-import org.apache.carbondata.core.util.CarbonUtil
import org.apache.carbondata.events._
import org.apache.carbondata.spark.rdd.CarbonDropPartitionRDD
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark2/src/main/scala/org/apache/spark/sql/execution/strategy/CarbonLateDecodeStrategy.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/strategy/CarbonLateDecodeStrategy.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/strategy/CarbonLateDecodeStrategy.scala
index da8e48b..a23a191 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/strategy/CarbonLateDecodeStrategy.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/strategy/CarbonLateDecodeStrategy.scala
@@ -174,13 +174,13 @@ private[sql] class CarbonLateDecodeStrategy extends SparkStrategy {
if (names.nonEmpty) {
val partitionSet = AttributeSet(names
.map(p => relation.output.find(_.name.equalsIgnoreCase(p)).get))
- val partitionKeyFilters = CarbonToSparkAdapater
+ val partitionKeyFilters = CarbonToSparkAdapter
.getPartitionKeyFilter(partitionSet, filterPredicates)
// Update the name with lower case as it is case sensitive while getting partition info.
val updatedPartitionFilters = partitionKeyFilters.map { exp =>
exp.transform {
case attr: AttributeReference =>
- CarbonToSparkAdapater.createAttributeReference(
+ CarbonToSparkAdapter.createAttributeReference(
attr.name.toLowerCase,
attr.dataType,
attr.nullable,
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonMetaStore.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonMetaStore.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonMetaStore.scala
index 9127f14..f97a8ae 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonMetaStore.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonMetaStore.scala
@@ -27,7 +27,7 @@ import org.apache.carbondata.core.metadata.{AbsoluteTableIdentifier, CarbonTable
import org.apache.carbondata.core.metadata.schema
import org.apache.carbondata.core.metadata.schema.table.CarbonTable
import org.apache.carbondata.core.util.CarbonProperties
-import org.apache.carbondata.format.{SchemaEvolutionEntry, TableInfo}
+import org.apache.carbondata.format.SchemaEvolutionEntry
/**
* Interface for Carbonmetastore
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonPreAggregateRules.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonPreAggregateRules.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonPreAggregateRules.scala
index a8d7d22..9be0961 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonPreAggregateRules.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonPreAggregateRules.scala
@@ -273,7 +273,7 @@ case class CarbonPreAggregateQueryRules(sparkSession: SparkSession) extends Rule
case attr: AttributeReference =>
updatedExpression.find { p => p._1.sameRef(attr) } match {
case Some((_, childAttr)) =>
- CarbonToSparkAdapater.createAttributeReference(
+ CarbonToSparkAdapter.createAttributeReference(
childAttr.name,
childAttr.dataType,
childAttr.nullable,
@@ -299,7 +299,7 @@ case class CarbonPreAggregateQueryRules(sparkSession: SparkSession) extends Rule
case attr: AttributeReference =>
updatedExpression.find { p => p._1.sameRef(attr) } match {
case Some((_, childAttr)) =>
- CarbonToSparkAdapater.createAttributeReference(
+ CarbonToSparkAdapter.createAttributeReference(
childAttr.name,
childAttr.dataType,
childAttr.nullable,
@@ -783,7 +783,7 @@ case class CarbonPreAggregateQueryRules(sparkSession: SparkSession) extends Rule
val factAlias = factPlanExpForStreaming(name)
// create attribute reference object for each expression
val attrs = factAlias.map { factAlias =>
- CarbonToSparkAdapater.createAttributeReference(
+ CarbonToSparkAdapter.createAttributeReference(
name,
alias.dataType,
alias.nullable,
@@ -797,7 +797,7 @@ case class CarbonPreAggregateQueryRules(sparkSession: SparkSession) extends Rule
val updatedAggExp = getAggregateExpressionForAggregation(aggExp, attrs)
// same reference id will be used as it can be used by above nodes in the plan like
// sort, project, join
- CarbonToSparkAdapater.createAliasRef(
+ CarbonToSparkAdapter.createAliasRef(
updatedAggExp.head,
name,
alias.exprId,
@@ -805,7 +805,7 @@ case class CarbonPreAggregateQueryRules(sparkSession: SparkSession) extends Rule
Option(alias.metadata),
Some(alias))
case alias@Alias(expression, name) =>
- CarbonToSparkAdapater.createAttributeReference(
+ CarbonToSparkAdapter.createAttributeReference(
name,
alias.dataType,
alias.nullable,
@@ -915,7 +915,7 @@ case class CarbonPreAggregateQueryRules(sparkSession: SparkSession) extends Rule
case attr: AttributeReference =>
newAggExp += attr
case exp: Expression =>
- newAggExp += CarbonToSparkAdapater.createAliasRef(
+ newAggExp += CarbonToSparkAdapter.createAliasRef(
exp,
"dummy_" + counter,
NamedExpression.newExprId)
@@ -942,7 +942,7 @@ case class CarbonPreAggregateQueryRules(sparkSession: SparkSession) extends Rule
// get the new aggregate expression
val newAggExp = getAggFunctionForFactStreaming(aggExp)
val updatedExp = newAggExp.map { exp =>
- CarbonToSparkAdapater.createAliasRef(exp,
+ CarbonToSparkAdapter.createAliasRef(exp,
name,
NamedExpression.newExprId,
alias.qualifier,
@@ -955,7 +955,7 @@ case class CarbonPreAggregateQueryRules(sparkSession: SparkSession) extends Rule
updatedExp
case alias@Alias(exp: Expression, name) =>
val newAlias = Seq(alias)
- val attr = CarbonToSparkAdapater.createAttributeReference(name,
+ val attr = CarbonToSparkAdapter.createAttributeReference(name,
alias.dataType,
alias.nullable,
alias.metadata,
@@ -1122,7 +1122,7 @@ case class CarbonPreAggregateQueryRules(sparkSession: SparkSession) extends Rule
private def removeQualifiers(expression: Expression) : Expression = {
expression.transform {
case attr: AttributeReference =>
- CarbonToSparkAdapater.createAttributeReference(
+ CarbonToSparkAdapter.createAttributeReference(
attr.name,
attr.dataType,
attr.nullable,
@@ -1395,7 +1395,7 @@ case class CarbonPreAggregateQueryRules(sparkSession: SparkSession) extends Rule
attr,
attributes)
val newExpressionId = NamedExpression.newExprId
- val childTableAttr = CarbonToSparkAdapater.createAttributeReference(attr.name,
+ val childTableAttr = CarbonToSparkAdapter.createAttributeReference(attr.name,
childAttr.dataType,
childAttr.nullable,
childAttr.metadata,
@@ -1413,14 +1413,14 @@ case class CarbonPreAggregateQueryRules(sparkSession: SparkSession) extends Rule
attr,
attributes)
val newExpressionId = NamedExpression.newExprId
- val parentTableAttr = CarbonToSparkAdapater.createAttributeReference(name,
+ val parentTableAttr = CarbonToSparkAdapter.createAttributeReference(name,
alias.dataType,
alias.nullable,
Metadata.empty,
alias.exprId,
alias.qualifier,
alias)
- val childTableAttr = CarbonToSparkAdapater.createAttributeReference(name,
+ val childTableAttr = CarbonToSparkAdapter.createAttributeReference(name,
alias.dataType,
alias.nullable,
Metadata.empty,
@@ -1452,7 +1452,7 @@ case class CarbonPreAggregateQueryRules(sparkSession: SparkSession) extends Rule
val newExpressionId = NamedExpression.newExprId
// create a parent attribute reference which will be replced on node which may be referred
// by node like sort join
- val parentTableAttr = CarbonToSparkAdapater.createAttributeReference(name,
+ val parentTableAttr = CarbonToSparkAdapter.createAttributeReference(name,
alias.dataType,
alias.nullable,
Metadata.empty,
@@ -1460,7 +1460,7 @@ case class CarbonPreAggregateQueryRules(sparkSession: SparkSession) extends Rule
alias.qualifier,
alias)
// creating a child attribute reference which will be replced
- val childTableAttr = CarbonToSparkAdapater.createAttributeReference(name,
+ val childTableAttr = CarbonToSparkAdapter.createAttributeReference(name,
alias.dataType,
alias.nullable,
Metadata.empty,
@@ -1477,7 +1477,7 @@ case class CarbonPreAggregateQueryRules(sparkSession: SparkSession) extends Rule
// for streaming table
// create alias for aggregate table
val aggExpForStreaming = aggExp.map{ exp =>
- CarbonToSparkAdapater.createAliasRef(exp,
+ CarbonToSparkAdapter.createAliasRef(exp,
name,
NamedExpression.newExprId,
alias.qualifier,
@@ -1511,14 +1511,14 @@ case class CarbonPreAggregateQueryRules(sparkSession: SparkSession) extends Rule
}
}
val newExpressionId = NamedExpression.newExprId
- val parentTableAttr = CarbonToSparkAdapater.createAttributeReference(name,
+ val parentTableAttr = CarbonToSparkAdapter.createAttributeReference(name,
alias.dataType,
alias.nullable,
Metadata.empty,
alias.exprId,
alias.qualifier,
alias)
- val childTableAttr = CarbonToSparkAdapater.createAttributeReference(name,
+ val childTableAttr = CarbonToSparkAdapter.createAttributeReference(name,
alias.dataType,
alias.nullable,
Metadata.empty,
@@ -1853,7 +1853,7 @@ case class CarbonPreAggregateDataLoadingRules(sparkSession: SparkSession)
// named expression list otherwise update the list and add it to set
if (!validExpressionsMap.contains(AggExpToColumnMappingModel(sumExp))) {
namedExpressionList +=
- CarbonToSparkAdapater.createAliasRef(expressions.head,
+ CarbonToSparkAdapter.createAliasRef(expressions.head,
name + "_ sum",
NamedExpression.newExprId,
alias.qualifier,
@@ -1865,7 +1865,7 @@ case class CarbonPreAggregateDataLoadingRules(sparkSession: SparkSession)
// named expression list otherwise update the list and add it to set
if (!validExpressionsMap.contains(AggExpToColumnMappingModel(countExp))) {
namedExpressionList +=
- CarbonToSparkAdapater.createAliasRef(expressions.last, name + "_ count",
+ CarbonToSparkAdapter.createAliasRef(expressions.last, name + "_ count",
NamedExpression.newExprId,
alias.qualifier,
Some(alias.metadata),
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala
index 36eb9ce..0f350b9 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala
@@ -788,7 +788,7 @@ class CarbonLateDecodeRule extends Rule[LogicalPlan] with PredicateHelper {
p.transformAllExpressions {
case a@Alias(exp, _)
if !exp.deterministic && !exp.isInstanceOf[CustomDeterministicExpression] =>
- CarbonToSparkAdapater.createAliasRef(CustomDeterministicExpression(exp),
+ CarbonToSparkAdapter.createAliasRef(CustomDeterministicExpression(exp),
a.name,
a.exprId,
a.qualifier,
@@ -806,7 +806,7 @@ class CarbonLateDecodeRule extends Rule[LogicalPlan] with PredicateHelper {
f.transformAllExpressions {
case a@Alias(exp, _)
if !exp.deterministic && !exp.isInstanceOf[CustomDeterministicExpression] =>
- CarbonToSparkAdapater.createAliasRef(CustomDeterministicExpression(exp),
+ CarbonToSparkAdapter.createAliasRef(CustomDeterministicExpression(exp),
a.name,
a.exprId,
a.qualifier,
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
index f50c240..d1023fa 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
@@ -20,7 +20,7 @@ package org.apache.spark.sql.parser
import scala.collection.mutable
import scala.language.implicitConversions
-import org.apache.spark.sql.{CarbonToSparkAdapater, DeleteRecords, UpdateTable}
+import org.apache.spark.sql.{CarbonToSparkAdapter, DeleteRecords, UpdateTable}
import org.apache.spark.sql.catalyst.{CarbonDDLSqlParser, TableIdentifier}
import org.apache.spark.sql.catalyst.CarbonTableIdentifierImplicit._
import org.apache.spark.sql.catalyst.plans.logical._
@@ -480,7 +480,7 @@ class CarbonSpark2SqlParser extends CarbonDDLSqlParser {
logicalPlan match {
case _: CarbonCreateTableCommand =>
ExplainCommand(logicalPlan, extended = isExtended.isDefined)
- case _ => CarbonToSparkAdapater.getExplainCommandObj
+ case _ => CarbonToSparkAdapter.getExplainCommandObj
}
}
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
index b71d035..357e1ec 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
@@ -24,7 +24,6 @@ import org.apache.spark.sql.catalyst.parser.{AbstractSqlParser, SqlBaseParser}
import org.apache.spark.sql.catalyst.parser.SqlBaseParser._
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.execution.SparkSqlAstBuilder
-import org.apache.spark.sql.execution.command.PartitionerField
import org.apache.spark.sql.internal.{SQLConf, VariableSubstitution}
import org.apache.spark.sql.types.StructField
import org.apache.spark.sql.util.CarbonException
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark2/src/main/spark2.1/org/apache/spark/sql/CarbonToSparkAdapater.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/spark2.1/org/apache/spark/sql/CarbonToSparkAdapater.scala b/integration/spark2/src/main/spark2.1/org/apache/spark/sql/CarbonToSparkAdapater.scala
index 52c27ee..69541eb 100644
--- a/integration/spark2/src/main/spark2.1/org/apache/spark/sql/CarbonToSparkAdapater.scala
+++ b/integration/spark2/src/main/spark2.1/org/apache/spark/sql/CarbonToSparkAdapater.scala
@@ -29,7 +29,7 @@ import org.apache.spark.sql.catalyst.plans.logical.OneRowRelation
import org.apache.spark.sql.execution.command.ExplainCommand
import org.apache.spark.sql.types.{DataType, Metadata}
-object CarbonToSparkAdapater {
+object CarbonToSparkAdapter {
def addSparkListener(sparkContext: SparkContext) = {
sparkContext.addSparkListener(new SparkListener {
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark2/src/main/spark2.2/org/apache/spark/sql/CarbonToSparkAdapater.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/spark2.2/org/apache/spark/sql/CarbonToSparkAdapater.scala b/integration/spark2/src/main/spark2.2/org/apache/spark/sql/CarbonToSparkAdapater.scala
deleted file mode 100644
index 244b097..0000000
--- a/integration/spark2/src/main/spark2.2/org/apache/spark/sql/CarbonToSparkAdapater.scala
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-package org.apache.spark.sql
-
-import java.net.URI
-
-import org.apache.spark.SparkContext
-import org.apache.spark.scheduler.{SparkListener, SparkListenerApplicationEnd}
-import org.apache.spark.sql.catalyst.catalog.CatalogStorageFormat
-import org.apache.spark.sql.catalyst.expressions.{Alias, AttributeReference, AttributeSet, ExprId, Expression, ExpressionSet, NamedExpression}
-import org.apache.spark.sql.catalyst.expressions.codegen.CodegenContext
-import org.apache.spark.sql.catalyst.optimizer.OptimizeCodegen
-import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, OneRowRelation}
-import org.apache.spark.sql.catalyst.rules.Rule
-import org.apache.spark.sql.execution.command.ExplainCommand
-import org.apache.spark.sql.internal.SQLConf
-import org.apache.spark.sql.types.{DataType, Metadata}
-
-object CarbonToSparkAdapater {
-
- def addSparkListener(sparkContext: SparkContext) = {
- sparkContext.addSparkListener(new SparkListener {
- override def onApplicationEnd(applicationEnd: SparkListenerApplicationEnd): Unit = {
- SparkSession.setDefaultSession(null)
- SparkSession.sqlListener.set(null)
- }
- })
- }
-
- def createAttributeReference(name: String, dataType: DataType, nullable: Boolean,
- metadata: Metadata,exprId: ExprId, qualifier: Option[String],
- attrRef : NamedExpression): AttributeReference = {
- AttributeReference(
- name,
- dataType,
- nullable,
- metadata)(exprId, qualifier,attrRef.isGenerated)
- }
-
- def createAliasRef(child: Expression,
- name: String,
- exprId: ExprId = NamedExpression.newExprId,
- qualifier: Option[String] = None,
- explicitMetadata: Option[Metadata] = None,
- namedExpr: Option[NamedExpression] = None): Alias = {
- val isGenerated:Boolean = if (namedExpr.isDefined) {
- namedExpr.get.isGenerated
- } else {
- false
- }
- Alias(child, name)(exprId, qualifier, explicitMetadata,isGenerated)
- }
-
- def getExplainCommandObj() : ExplainCommand = {
- ExplainCommand(OneRowRelation)
- }
-
- def getPartitionKeyFilter(
- partitionSet: AttributeSet,
- filterPredicates: Seq[Expression]): ExpressionSet = {
- ExpressionSet(
- ExpressionSet(filterPredicates)
- .filter(_.references.subsetOf(partitionSet)))
- }
-
- def getOptimizeCodegenRule(conf :SQLConf): Seq[Rule[LogicalPlan]] = {
- Seq(OptimizeCodegen(conf))
- }
-
- def getUpdatedStorageFormat(storageFormat: CatalogStorageFormat,
- map: Map[String, String],
- tablePath: String): CatalogStorageFormat = {
- storageFormat.copy(properties = map, locationUri = Some(new URI(tablePath)))
- }
-}
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark2/src/main/spark2.2/org/apache/spark/sql/CarbonToSparkAdapter.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/spark2.2/org/apache/spark/sql/CarbonToSparkAdapter.scala b/integration/spark2/src/main/spark2.2/org/apache/spark/sql/CarbonToSparkAdapter.scala
new file mode 100644
index 0000000..446b5a5
--- /dev/null
+++ b/integration/spark2/src/main/spark2.2/org/apache/spark/sql/CarbonToSparkAdapter.scala
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package org.apache.spark.sql
+
+import java.net.URI
+
+import org.apache.spark.SparkContext
+import org.apache.spark.scheduler.{SparkListener, SparkListenerApplicationEnd}
+import org.apache.spark.sql.catalyst.catalog.CatalogStorageFormat
+import org.apache.spark.sql.catalyst.expressions.{Alias, AttributeReference, AttributeSet, ExprId, Expression, ExpressionSet, NamedExpression}
+import org.apache.spark.sql.catalyst.optimizer.OptimizeCodegen
+import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, OneRowRelation}
+import org.apache.spark.sql.catalyst.rules.Rule
+import org.apache.spark.sql.execution.command.ExplainCommand
+import org.apache.spark.sql.internal.SQLConf
+import org.apache.spark.sql.types.{DataType, Metadata}
+
+object CarbonToSparkAdapter {
+
+ def addSparkListener(sparkContext: SparkContext) = {
+ sparkContext.addSparkListener(new SparkListener {
+ override def onApplicationEnd(applicationEnd: SparkListenerApplicationEnd): Unit = {
+ SparkSession.setDefaultSession(null)
+ SparkSession.sqlListener.set(null)
+ }
+ })
+ }
+
+ def createAttributeReference(name: String, dataType: DataType, nullable: Boolean,
+ metadata: Metadata,exprId: ExprId, qualifier: Option[String],
+ attrRef : NamedExpression): AttributeReference = {
+ AttributeReference(
+ name,
+ dataType,
+ nullable,
+ metadata)(exprId, qualifier,attrRef.isGenerated)
+ }
+
+ def createAliasRef(child: Expression,
+ name: String,
+ exprId: ExprId = NamedExpression.newExprId,
+ qualifier: Option[String] = None,
+ explicitMetadata: Option[Metadata] = None,
+ namedExpr: Option[NamedExpression] = None): Alias = {
+ val isGenerated:Boolean = if (namedExpr.isDefined) {
+ namedExpr.get.isGenerated
+ } else {
+ false
+ }
+ Alias(child, name)(exprId, qualifier, explicitMetadata,isGenerated)
+ }
+
+ def getExplainCommandObj() : ExplainCommand = {
+ ExplainCommand(OneRowRelation)
+ }
+
+ def getPartitionKeyFilter(
+ partitionSet: AttributeSet,
+ filterPredicates: Seq[Expression]): ExpressionSet = {
+ ExpressionSet(
+ ExpressionSet(filterPredicates)
+ .filter(_.references.subsetOf(partitionSet)))
+ }
+
+ def getOptimizeCodegenRule(conf :SQLConf): Seq[Rule[LogicalPlan]] = {
+ Seq(OptimizeCodegen(conf))
+ }
+
+ def getUpdatedStorageFormat(storageFormat: CatalogStorageFormat,
+ map: Map[String, String],
+ tablePath: String): CatalogStorageFormat = {
+ storageFormat.copy(properties = map, locationUri = Some(new URI(tablePath)))
+ }
+}
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark2/src/main/spark2.3/org/apache/spark/sql/CarbonToSparkAdapater.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/spark2.3/org/apache/spark/sql/CarbonToSparkAdapater.scala b/integration/spark2/src/main/spark2.3/org/apache/spark/sql/CarbonToSparkAdapater.scala
index 4c4ce4d..9094dfe 100644
--- a/integration/spark2/src/main/spark2.3/org/apache/spark/sql/CarbonToSparkAdapater.scala
+++ b/integration/spark2/src/main/spark2.3/org/apache/spark/sql/CarbonToSparkAdapater.scala
@@ -30,7 +30,7 @@ import org.apache.spark.sql.execution.command.ExplainCommand
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.{DataType, Metadata}
-object CarbonToSparkAdapater {
+object CarbonToSparkAdapter {
def addSparkListener(sparkContext: SparkContext) = {
sparkContext.addSparkListener(new SparkListener {
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark2/src/test/scala/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapTestUtil.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapTestUtil.scala b/integration/spark2/src/test/scala/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapTestUtil.scala
index fc7c5af..108c8cb 100644
--- a/integration/spark2/src/test/scala/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapTestUtil.scala
+++ b/integration/spark2/src/test/scala/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapTestUtil.scala
@@ -6,7 +6,7 @@ import java.util.UUID
import scala.util.Random
import org.apache.spark.sql.test.util.QueryTest
-import org.apache.spark.sql.{CarbonSession, DataFrame}
+import org.apache.spark.sql.DataFrame
object BloomCoarseGrainDataMapTestUtil extends QueryTest {
http://git-wip-us.apache.org/repos/asf/carbondata/blob/21330b82/integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/allqueries/InsertIntoCarbonTableSpark2TestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/allqueries/InsertIntoCarbonTableSpark2TestCase.scala b/integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/allqueries/InsertIntoCarbonTableSpark2TestCase.scala
index d332261..4ba4b63 100644
--- a/integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/allqueries/InsertIntoCarbonTableSpark2TestCase.scala
+++ b/integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/allqueries/InsertIntoCarbonTableSpark2TestCase.scala
@@ -20,9 +20,6 @@ import org.apache.spark.sql.Row
import org.apache.spark.sql.common.util.Spark2QueryTest
import org.scalatest.BeforeAndAfterAll
-import org.apache.carbondata.core.constants.CarbonCommonConstants
-import org.apache.carbondata.core.util.CarbonProperties
-
class InsertIntoCarbonTableSpark2TestCase extends Spark2QueryTest with BeforeAndAfterAll {
override def beforeAll: Unit = {
sql("drop table if exists OneRowTable")