You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by xu...@apache.org on 2019/06/25 04:53:27 UTC
[carbondata] branch master updated: [CARBONDATA-3398] Added a new
Column to show Cache Location
This is an automated email from the ASF dual-hosted git repository.
xubo245 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/carbondata.git
The following commit(s) were added to refs/heads/master by this push:
new e7c96ab [CARBONDATA-3398] Added a new Column to show Cache Location
e7c96ab is described below
commit e7c96abef8e6ac0dcdb30ed5e43424a015565478
Author: kunal642 <ku...@gmail.com>
AuthorDate: Tue May 28 15:30:49 2019 +0530
[CARBONDATA-3398] Added a new Column to show Cache Location
Handled show cache for index server and MV
This closes #3259
---
.../carbondata/core/metadata/SegmentFileStore.java | 2 +-
.../sql/commands/TestCarbonShowCacheCommand.scala | 38 +++++++-------
.../command/cache/CarbonShowCacheCommand.scala | 60 +++++++++++++---------
3 files changed, 56 insertions(+), 44 deletions(-)
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java b/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java
index 2b02ea4..bc4f05b 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java
@@ -461,7 +461,7 @@ public class SegmentFileStore {
* @param segmentFilePath
* @return
*/
- private static SegmentFile readSegmentFile(String segmentFilePath) throws IOException {
+ public static SegmentFile readSegmentFile(String segmentFilePath) throws IOException {
Gson gsonObjectToRead = new Gson();
DataInputStream dataInputStream = null;
BufferedReader buffReader = null;
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/sql/commands/TestCarbonShowCacheCommand.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/sql/commands/TestCarbonShowCacheCommand.scala
index eb4b769..0cc2727 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/sql/commands/TestCarbonShowCacheCommand.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/sql/commands/TestCarbonShowCacheCommand.scala
@@ -124,17 +124,17 @@ class TestCarbonShowCacheCommand extends QueryTest with BeforeAndAfterAll {
sql(s"LOAD DATA INPATH '$resourcesPath/data.csv' INTO TABLE empTable")
sql("select count(*) from empTable").show()
var showCache = sql("SHOW METACACHE on table empTable").collect()
- assert(showCache(1).get(2).toString.equalsIgnoreCase("3/3 index files cached"))
+ assert(showCache(0).get(2).toString.equalsIgnoreCase("3/3 index files cached"))
sql("delete from table empTable where segment.id in(0)").show()
// check whether count(*) query invalidates the cache for the invalid segments
sql("select count(*) from empTable").show()
showCache = sql("SHOW METACACHE on table empTable").collect()
- assert(showCache(1).get(2).toString.equalsIgnoreCase("2/2 index files cached"))
+ assert(showCache(0).get(2).toString.equalsIgnoreCase("2/2 index files cached"))
sql("delete from table empTable where segment.id in(1)").show()
// check whether select * query invalidates the cache for the invalid segments
sql("select * from empTable").show()
showCache = sql("SHOW METACACHE on table empTable").collect()
- assert(showCache(1).get(2).toString.equalsIgnoreCase("1/1 index files cached"))
+ assert(showCache(0).get(2).toString.equalsIgnoreCase("1/1 index files cached"))
}
test("test external table show cache") {
@@ -181,12 +181,12 @@ class TestCarbonShowCacheCommand extends QueryTest with BeforeAndAfterAll {
sql("use cache_empty_db").collect()
val result1 = sql("show metacache").collect()
assertResult(2)(result1.length)
- assertResult(Row("cache_empty_db", "ALL", "0 B", "0 B", "0 B"))(result1(1))
+ assertResult(Row("cache_empty_db", "ALL", "0 B", "0 B", "0 B", "DRIVER"))(result1(1))
// Database with 3 tables but only 2 are in cache
sql("use cache_db").collect()
val result2 = sql("show metacache").collect()
- assertResult(5)(result2.length)
+ assertResult(4)(result2.length)
// Make sure PreAgg tables are not in SHOW METADATA
sql("use default").collect()
@@ -202,33 +202,33 @@ class TestCarbonShowCacheCommand extends QueryTest with BeforeAndAfterAll {
// Table with Index, Dictionary & Bloom filter
val result1 = sql("show metacache on table cache_1").collect()
- assertResult(4)(result1.length)
- assertResult("1/1 index files cached")(result1(1).getString(2))
- assertResult("bloomfilter")(result1(3).getString(2))
+ assertResult(3)(result1.length)
+ assertResult("1/1 index files cached")(result1(0).getString(2))
+ assertResult("bloomfilter")(result1(2).getString(2))
// Table with Index and Dictionary
val result2 = sql("show metacache on table cache_db.cache_2").collect()
- assertResult(3)(result2.length)
- assertResult("2/2 index files cached")(result2(1).getString(2))
- assertResult("0 B")(result2(2).getString(1))
+ assertResult(2)(result2.length)
+ assertResult("2/2 index files cached")(result2(0).getString(2))
+ assertResult("0 B")(result2(1).getString(1))
// Table not in cache
checkAnswer(sql("show metacache on table cache_db.cache_3"),
- Seq(Row("DRIVER CACHE","",""), Row("Index", "0 B", "0/1 index files cached"),
- Row("Dictionary", "0 B", "")))
+ Seq(Row("Index", "0 B", "0/1 index files cached", "DRIVER"),
+ Row("Dictionary", "0 B", "", "DRIVER")))
// Table with Index, Dictionary & PreAgg child table
val result4 = sql("show metacache on table default.cache_4").collect()
- assertResult(4)(result4.length)
- assertResult("1/1 index files cached")(result4(1).getString(2))
- assertResult("0 B")(result4(2).getString(1))
- assertResult("preaggregate")(result4(3).getString(2))
+ assertResult(3)(result4.length)
+ assertResult("1/1 index files cached")(result4(0).getString(2))
+ assertResult("0 B")(result4(1).getString(1))
+ assertResult("preaggregate")(result4(2).getString(2))
sql("use default").collect()
// Table with 5 index files
val result5 = sql("show metacache on table cache_5").collect()
- assertResult(3)(result5.length)
- assertResult("5/5 index files cached")(result5(1).getString(2))
+ assertResult(2)(result5.length)
+ assertResult("5/5 index files cached")(result5(0).getString(2))
}
}
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/cache/CarbonShowCacheCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/cache/CarbonShowCacheCommand.scala
index e5cda69..795c063 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/cache/CarbonShowCacheCommand.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/cache/CarbonShowCacheCommand.scala
@@ -58,12 +58,14 @@ case class CarbonShowCacheCommand(tableIdentifier: Option[TableIdentifier],
AttributeReference("Table", StringType, nullable = false)(),
AttributeReference("Index size", StringType, nullable = false)(),
AttributeReference("Datamap size", StringType, nullable = false)(),
- AttributeReference("Dictionary size", StringType, nullable = false)())
+ AttributeReference("Dictionary size", StringType, nullable = false)(),
+ AttributeReference("Cache Location", StringType, nullable = false)())
} else {
Seq(
AttributeReference("Field", StringType, nullable = false)(),
AttributeReference("Size", StringType, nullable = false)(),
- AttributeReference("Comment", StringType, nullable = false)())
+ AttributeReference("Comment", StringType, nullable = false)(),
+ AttributeReference("Cache Location", StringType, nullable = false)())
}
}
@@ -95,15 +97,14 @@ case class CarbonShowCacheCommand(tableIdentifier: Option[TableIdentifier],
indexRawResults.drop(2).map { row =>
Row(row.get(0), row.getLong(1) + row.getLong(2), row.get(3))
}
- Seq(Row("DRIVER CACHE", "", "")) ++ result.map {
+ result.map {
row =>
- Row(row.get(0), bytesToDisplaySize(row.getLong(1)), row.get(2))
+ Row(row.get(0), bytesToDisplaySize(row.getLong(1)), row.get(2), "DRIVER")
} ++ (serverResults match {
case Nil => Seq()
case list =>
- Seq(Row("-----------", "-----------", "-----------"), Row("INDEX CACHE", "", "")) ++
list.map {
- row => Row(row.get(0), bytesToDisplaySize(row.getLong(1)), row.get(2))
+ row => Row(row.get(0), bytesToDisplaySize(row.getLong(1)), row.get(2), "INDEX SERVER")
}
})
}
@@ -156,8 +157,11 @@ case class CarbonShowCacheCommand(tableIdentifier: Option[TableIdentifier],
driverRows)
val (indexdbIndexSize, indexdbDatamapSize, indexAllDictSize) = calculateDBIndexAndDatamapSize(
indexServerRows)
- val (indexAllIndexSize, indexAllDatamapSize) = getIndexServerCacheSizeForCurrentDB
-
+ val (indexAllIndexSize, indexAllDatamapSize) = if (isDistributedPruningEnabled) {
+ getIndexServerCacheSizeForCurrentDB
+ } else {
+ (0, 0)
+ }
val driverDisplayRows = if (cache != null) {
val tablePaths = carbonTables.map {
carbonTable =>
@@ -166,15 +170,19 @@ case class CarbonShowCacheCommand(tableIdentifier: Option[TableIdentifier],
val (driverIndexSize, driverDatamapSize, allDictSize) = getAllDriverCacheSize(tablePaths
.toList)
if (driverRows.nonEmpty) {
- val rows = (Seq(
- Row("ALL", "ALL", driverIndexSize, driverDatamapSize, allDictSize),
- Row(currentDatabase, "ALL", driverdbIndexSize, driverdbDatamapSize, driverdbDictSize)
+ (Seq(
+ Row("ALL", "ALL", driverIndexSize, driverDatamapSize, allDictSize, "DRIVER"),
+ Row(currentDatabase,
+ "ALL",
+ driverdbIndexSize,
+ driverdbDatamapSize,
+ driverdbDictSize,
+ "DRIVER")
) ++ driverRows).collect {
case row if row.getLong(2) != 0L || row.getLong(3) != 0L || row.getLong(4) != 0L =>
Row(row(0), row(1), bytesToDisplaySize(row.getLong(2)),
- bytesToDisplaySize(row.getLong(3)), bytesToDisplaySize(row.getLong(4)))
+ bytesToDisplaySize(row.getLong(3)), bytesToDisplaySize(row.getLong(4)), "DRIVER")
}
- Seq(Row("DRIVER CACHE", "", "", "", "")) ++ rows
} else {
makeEmptyCacheRows(currentDatabase)
}
@@ -184,15 +192,19 @@ case class CarbonShowCacheCommand(tableIdentifier: Option[TableIdentifier],
// val (serverIndexSize, serverDataMapSize) = getAllIndexServerCacheSize
val indexDisplayRows = if (indexServerRows.nonEmpty) {
- val rows = (Seq(
- Row("ALL", "ALL", indexAllIndexSize, indexAllDatamapSize, indexAllDictSize),
- Row(currentDatabase, "ALL", indexdbIndexSize, indexdbDatamapSize, driverdbDictSize)
+ (Seq(
+ Row("ALL", "ALL", indexAllIndexSize, indexAllDatamapSize, indexAllDictSize, "INDEX SERVER"),
+ Row(currentDatabase,
+ "ALL",
+ indexdbIndexSize,
+ indexdbDatamapSize,
+ driverdbDictSize,
+ "INDEX SERVER")
) ++ indexServerRows).collect {
case row if row.getLong(2) != 0L || row.getLong(3) != 0L || row.getLong(4) != 0L =>
Row(row.get(0), row.get(1), bytesToDisplaySize(row.getLong(2)),
- bytesToDisplaySize(row.getLong(3)), bytesToDisplaySize(row.getLong(4)))
+ bytesToDisplaySize(row.getLong(3)), bytesToDisplaySize(row.getLong(4)), "INDEX SERVER")
}
- Seq(Row("INDEX SERVER CACHE", "", "", "", "")) ++ rows
} else {
Seq()
}
@@ -237,13 +249,13 @@ case class CarbonShowCacheCommand(tableIdentifier: Option[TableIdentifier],
comments += " (external table)"
}
Seq(
- Row("Index", parentMetaCacheInfo._3, comments),
- Row("Dictionary", parentDictionary, "")
+ Row("Index", parentMetaCacheInfo._3, comments, ""),
+ Row("Dictionary", parentDictionary, "", "")
) ++ childMetaCacheInfos
} else {
Seq(
- Row("Index", 0L, ""),
- Row("Dictionary", 0L, "")
+ Row("Index", 0L, "", ""),
+ Row("Dictionary", 0L, "", "")
)
}
}
@@ -252,9 +264,9 @@ case class CarbonShowCacheCommand(tableIdentifier: Option[TableIdentifier],
private def makeEmptyCacheRows(currentDatabase: String) = {
Seq(
- Row("ALL", "ALL", bytesToDisplaySize(0), bytesToDisplaySize(0), bytesToDisplaySize(0)),
+ Row("ALL", "ALL", bytesToDisplaySize(0), bytesToDisplaySize(0), bytesToDisplaySize(0), ""),
Row(currentDatabase, "ALL", bytesToDisplaySize(0), bytesToDisplaySize(0),
- bytesToDisplaySize(0)))
+ bytesToDisplaySize(0), "DRIVER"))
}
private def calculateDBIndexAndDatamapSize(rows: Seq[Row]): (Long, Long, Long) = {