You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by ra...@apache.org on 2019/03/25 05:26:35 UTC

[carbondata] branch master updated: [CARBONDATA-3322] [CARBONDATA-3323] Added check for invalid tables in ShowCacheCommand & Standard output on ShowCacheCommand on table

This is an automated email from the ASF dual-hosted git repository.

ravipesala pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/carbondata.git


The following commit(s) were added to refs/heads/master by this push:
     new 5c52876  [CARBONDATA-3322] [CARBONDATA-3323] Added check for invalid tables in ShowCacheCommand & Standard output on ShowCacheCommand on table
5c52876 is described below

commit 5c528765f2be7bcb326cc7d47a2a86b313e54832
Author: namanrastogi <na...@gmail.com>
AuthorDate: Thu Mar 21 20:33:55 2019 +0530

    [CARBONDATA-3322] [CARBONDATA-3323] Added check for invalid tables in ShowCacheCommand & Standard output on ShowCacheCommand on table
    
    Problem 1:
    After we alter table name from t1 to t2, SHOW METACACHE ON TABLE works for both old table name t1 and new table name t2.
    Fix:
    Added check for table.
    
    Problem 2:
    When SHOW METACACHE ON TABLE is executed and carbonLRUCAche is null, output is empty sequence, which is not standard.
    Fix:
    Return standard output even when carbonLRUCache is not initalised (null) with size for index and dictionary as 0.
    
    This closes #3157
---
 .../command/cache/CarbonShowCacheCommand.scala     | 31 +++++++++++++++++-----
 1 file changed, 24 insertions(+), 7 deletions(-)

diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/cache/CarbonShowCacheCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/cache/CarbonShowCacheCommand.scala
index e19ee48..8461bf3 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/cache/CarbonShowCacheCommand.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/cache/CarbonShowCacheCommand.scala
@@ -23,8 +23,9 @@ import scala.collection.JavaConverters._
 import org.apache.hadoop.mapred.JobConf
 import org.apache.spark.sql.{CarbonEnv, Row, SparkSession}
 import org.apache.spark.sql.catalyst.TableIdentifier
+import org.apache.spark.sql.catalyst.analysis.NoSuchTableException
 import org.apache.spark.sql.catalyst.expressions.AttributeReference
-import org.apache.spark.sql.execution.command.MetadataCommand
+import org.apache.spark.sql.execution.command.{Checker, MetadataCommand}
 import org.apache.spark.sql.types.StringType
 
 import org.apache.carbondata.core.cache.{CacheProvider, CacheType}
@@ -64,7 +65,7 @@ case class CarbonShowCacheCommand(tableIdentifier: Option[TableIdentifier],
 
   def getAllTablesCache(sparkSession: SparkSession): Seq[Row] = {
     val currentDatabase = sparkSession.sessionState.catalog.getCurrentDatabase
-    val cache = CacheProvider.getInstance().getCarbonCache()
+    val cache = CacheProvider.getInstance().getCarbonCache
     if (cache == null) {
       Seq(
         Row("ALL", "ALL", 0L, 0L, 0L),
@@ -74,6 +75,7 @@ case class CarbonShowCacheCommand(tableIdentifier: Option[TableIdentifier],
         .listAllTables(sparkSession).filter {
         carbonTable =>
           carbonTable.getDatabaseName.equalsIgnoreCase(currentDatabase) &&
+          isValidTable(carbonTable, sparkSession) &&
           !carbonTable.isChildDataMap
       }
 
@@ -131,6 +133,18 @@ case class CarbonShowCacheCommand(tableIdentifier: Option[TableIdentifier],
 
   def getTableCache(sparkSession: SparkSession, carbonTable: CarbonTable): Seq[Row] = {
     val cache = CacheProvider.getInstance().getCarbonCache
+    val allIndexFiles: List[String] = CacheUtil.getAllIndexFiles(carbonTable)
+    if (cache == null) {
+      var comments = 0 + "/" + allIndexFiles.size + " index files cached"
+      if (!carbonTable.isTransactionalTable) {
+        comments += " (external table)"
+      }
+      return Seq(
+        Row("Index", 0L, comments),
+        Row("Dictionary", 0L, "")
+      )
+    }
+
     val showTableCacheEvent = ShowTableCacheEvent(carbonTable, sparkSession, internalCall)
     val operationContext = new OperationContext
     // datamapName -> (datamapProviderName, indexSize, datamapSize)
@@ -138,8 +152,7 @@ case class CarbonShowCacheCommand(tableIdentifier: Option[TableIdentifier],
     operationContext.setProperty(carbonTable.getTableUniqueName, currentTableSizeMap)
     OperationListenerBus.getInstance.fireEvent(showTableCacheEvent, operationContext)
 
-    // Get all Index files for the specified table.
-    val allIndexFiles: List[String] = CacheUtil.getAllIndexFiles(carbonTable)
+    // Get all Index files for the specified table in cache
     val indexFilesInCache: List[String] = allIndexFiles.filter {
       indexFile =>
         cache.get(indexFile) != null
@@ -190,9 +203,8 @@ case class CarbonShowCacheCommand(tableIdentifier: Option[TableIdentifier],
        * Assemble result for table
        */
       val carbonTable = CarbonEnv.getCarbonTable(tableIdentifier.get)(sparkSession)
-      if (CacheProvider.getInstance().getCarbonCache == null) {
-        return Seq.empty
-      }
+      Checker
+        .validateTableExists(tableIdentifier.get.database, tableIdentifier.get.table, sparkSession)
       val rawResult = getTableCache(sparkSession, carbonTable)
       val result = rawResult.slice(0, 2) ++
                    rawResult.drop(2).map {
@@ -205,4 +217,9 @@ case class CarbonShowCacheCommand(tableIdentifier: Option[TableIdentifier],
       }
     }
   }
+
+  def isValidTable(carbonTable: CarbonTable, sparkSession: SparkSession): Boolean = {
+    CarbonEnv.getInstance(sparkSession).carbonMetaStore.tableExists(carbonTable.getTableName,
+      Some(carbonTable.getDatabaseName))(sparkSession)
+  }
 }