You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@carbondata.apache.org by GitBox <gi...@apache.org> on 2019/03/27 06:24:29 UTC

[GitHub] [carbondata] qiuchenjian commented on a change in pull request #3164: [CARBONDATA-3331] Fix for external table in Show Metacache

qiuchenjian commented on a change in pull request #3164: [CARBONDATA-3331] Fix for external table in Show Metacache
URL: https://github.com/apache/carbondata/pull/3164#discussion_r269418645
 
 

 ##########
 File path: integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/cache/CarbonShowCacheCommand.scala
 ##########
 @@ -71,52 +65,68 @@ case class CarbonShowCacheCommand(tableIdentifier: Option[TableIdentifier],
         Row("ALL", "ALL", 0L, 0L, 0L),
         Row(currentDatabase, "ALL", 0L, 0L, 0L))
     } else {
-      val carbonTables = CarbonEnv.getInstance(sparkSession).carbonMetaStore
-        .listAllTables(sparkSession).filter {
-        carbonTable =>
-          carbonTable.getDatabaseName.equalsIgnoreCase(currentDatabase) &&
-          isValidTable(carbonTable, sparkSession) &&
-          !carbonTable.isChildDataMap
+      val carbonTables = sparkSession.sessionState.catalog.listTables(currentDatabase).collect {
+        case tableIdent if CarbonEnv.getInstance(sparkSession).carbonMetaStore
+          .tableExists(tableIdent)(sparkSession) =>
+          CarbonEnv.getCarbonTable(tableIdent)(sparkSession)
       }
 
       // All tables of current database
-      var (dbIndexSize, dbDatamapSize, dbDictSize) = (0L, 0L, 0L)
-      val tableList: Seq[Row] = carbonTables.map {
+      var (dbDatamapSize, dbDictSize) = (0L, 0L)
+      val tableList = carbonTables.flatMap {
         carbonTable =>
-          val tableResult = getTableCache(sparkSession, carbonTable)
-          var (indexSize, datamapSize) = (tableResult(0).getLong(1), 0L)
-          tableResult.drop(2).foreach {
-            row =>
-              indexSize += row.getLong(1)
-              datamapSize += row.getLong(2)
-          }
-          val dictSize = tableResult(1).getLong(1)
-
-          dbIndexSize += indexSize
-          dbDictSize += dictSize
-          dbDatamapSize += datamapSize
-
-          val tableName = if (!carbonTable.isTransactionalTable) {
-            carbonTable.getTableName + " (external table)"
-          }
-          else {
-            carbonTable.getTableName
+          try {
+            val tableResult = getTableCache(sparkSession, carbonTable)
+            var (indexSize, datamapSize) = (tableResult(0).getLong(1), 0L)
+            tableResult.drop(2).foreach {
+              row =>
+                indexSize += row.getLong(1)
+                datamapSize += row.getLong(2)
+            }
+            val dictSize = tableResult(1).getLong(1)
+
+            dbDictSize += dictSize
+            dbDatamapSize += datamapSize
+
+            val tableName = if (!carbonTable.isTransactionalTable) {
+              carbonTable.getTableName + " (external table)"
+            }
+            else {
+              carbonTable.getTableName
+            }
+            Seq((currentDatabase, tableName, indexSize, datamapSize, dictSize))
+          } catch {
+            case ex: UnsupportedOperationException =>
+              Seq.empty
           }
-          (currentDatabase, tableName, indexSize, datamapSize, dictSize)
       }.collect {
         case (db, table, indexSize, datamapSize, dictSize) if !((indexSize == 0) &&
                                                                 (datamapSize == 0) &&
                                                                 (dictSize == 0)) =>
           Row(db, table, indexSize, datamapSize, dictSize)
       }
 
+      val tablePaths = carbonTables.map {
+        carbonTable =>
+          carbonTable.getTablePath
+      }
+
       // Scan whole cache and fill the entries for All-Database-All-Tables
+      // and Current-Database-All-Tables
       var (allIndexSize, allDatamapSize, allDictSize) = (0L, 0L, 0L)
+      var dbIndexSize = 0L
       cache.getCacheMap.asScala.foreach {
-        case (_, cacheable) =>
+        case (key, cacheable) =>
           cacheable match {
             case _: BlockletDataMapIndexWrapper =>
               allIndexSize += cacheable.getMemorySize
+              val tablePath = tablePaths.find {
 
 Review comment:
   No need tmp variable table Path and exists is better

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services