You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@kylin.apache.org by ni...@apache.org on 2019/06/24 05:32:21 UTC
[kylin] branch master updated: KYLIN-4054 Logger of HCreateTableJob
record error message
This is an automated email from the ASF dual-hosted git repository.
nic pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kylin.git
The following commit(s) were added to refs/heads/master by this push:
new 9a72b9e KYLIN-4054 Logger of HCreateTableJob record error message
9a72b9e is described below
commit 9a72b9efe88de98cf6f3abc47b7fee235db2b10a
Author: hit-lacus <hi...@126.com>
AuthorDate: Mon Jun 24 11:49:23 2019 +0800
KYLIN-4054 Logger of HCreateTableJob record error message
---
.../common/persistence/JDBCConnectionManager.java | 4 +--
.../storage/gtrecord/GTCubeStorageQueryBase.java | 32 ++++++++++++----------
.../kylin/storage/hbase/steps/CreateHTableJob.java | 27 +++++++++---------
.../org/apache/kylin/tool/CubeMetaIngester.java | 8 +++---
4 files changed, 36 insertions(+), 35 deletions(-)
diff --git a/core-common/src/main/java/org/apache/kylin/common/persistence/JDBCConnectionManager.java b/core-common/src/main/java/org/apache/kylin/common/persistence/JDBCConnectionManager.java
index dcb9a1b..3ca4026 100644
--- a/core-common/src/main/java/org/apache/kylin/common/persistence/JDBCConnectionManager.java
+++ b/core-common/src/main/java/org/apache/kylin/common/persistence/JDBCConnectionManager.java
@@ -62,7 +62,7 @@ public class JDBCConnectionManager {
dataSource = BasicDataSourceFactory.createDataSource(getDbcpProperties());
Connection conn = getConn();
DatabaseMetaData mdm = conn.getMetaData();
- logger.info("Connected to {0} {1}", mdm.getDatabaseProductName(), mdm.getDatabaseProductVersion());
+ logger.info("Connected to {} {}", mdm.getDatabaseProductName(), mdm.getDatabaseProductVersion());
closeQuietly(conn);
} catch (Exception e) {
throw new IllegalArgumentException(e);
@@ -89,7 +89,7 @@ public class JDBCConnectionManager {
ret.remove("passwordEncrypted");
}
- logger.info("Connecting to Jdbc with url:{0} by user {1}", ret.get("url"), ret.get("username"));
+ logger.info("Connecting to Jdbc with url:{} by user {}", ret.get("url"), ret.get("username"));
putIfMissing(ret, "driverClassName", "com.mysql.jdbc.Driver");
putIfMissing(ret, "maxActive", "5");
diff --git a/core-storage/src/main/java/org/apache/kylin/storage/gtrecord/GTCubeStorageQueryBase.java b/core-storage/src/main/java/org/apache/kylin/storage/gtrecord/GTCubeStorageQueryBase.java
index 3c3c7ff..2273dbe 100644
--- a/core-storage/src/main/java/org/apache/kylin/storage/gtrecord/GTCubeStorageQueryBase.java
+++ b/core-storage/src/main/java/org/apache/kylin/storage/gtrecord/GTCubeStorageQueryBase.java
@@ -305,7 +305,7 @@ public abstract class GTCubeStorageQueryBase implements IStorageQuery {
private long getQueryFilterMask(Set<TblColRef> filterColumnD) {
long filterMask = 0;
- logger.info("Filter column set for query: %s", filterColumnD);
+ logger.info("Filter column set for query: {}", filterColumnD);
if (filterColumnD.isEmpty() == false) {
RowKeyColDesc[] allColumns = cubeDesc.getRowkey().getRowKeyColumns();
for (int i = 0; i < allColumns.length; i++) {
@@ -314,7 +314,7 @@ public abstract class GTCubeStorageQueryBase implements IStorageQuery {
}
}
}
- logger.info("Filter mask is: {0}", filterMask);
+ logger.info("Filter mask is: {}", filterMask);
return filterMask;
}
@@ -430,18 +430,19 @@ public abstract class GTCubeStorageQueryBase implements IStorageQuery {
if (!groupsD.containsAll(cuboid.getColumns().subList(0, size))) {
storageLimitLevel = StorageLimitLevel.LIMIT_ON_RETURN_SIZE;
logger.debug(
- "storageLimitLevel set to LIMIT_ON_RETURN_SIZE because groupD is not clustered at head, groupsD: {0} with cuboid columns: {1}", groupsD.toString(), cuboid.getColumns().toString());
+ "storageLimitLevel set to LIMIT_ON_RETURN_SIZE because groupD is not clustered at head, groupsD: {} with cuboid columns: {}",
+ groupsD, cuboid.getColumns());
}
if (!dynGroups.isEmpty()) {
storageLimitLevel = StorageLimitLevel.NO_LIMIT;
- logger.debug("Storage limit push down is impossible because the query has dynamic groupby {0}", dynGroups);
+ logger.debug("Storage limit push down is impossible because the query has dynamic groupby {}", dynGroups);
}
// derived aggregation is bad, unless expanded columns are already in group by
if (!groups.containsAll(derivedPostAggregation)) {
storageLimitLevel = StorageLimitLevel.NO_LIMIT;
- logger.debug("storageLimitLevel set to NO_LIMIT because derived column require post aggregation: {0}",
+ logger.debug("storageLimitLevel set to NO_LIMIT because derived column require post aggregation: {}",
derivedPostAggregation);
}
@@ -452,7 +453,7 @@ public abstract class GTCubeStorageQueryBase implements IStorageQuery {
if (!loosenedColumnD.isEmpty()) { // KYLIN-2173
storageLimitLevel = StorageLimitLevel.NO_LIMIT;
- logger.debug("storageLimitLevel set to NO_LIMIT because filter is loosened: {0}", loosenedColumnD);
+ logger.debug("storageLimitLevel set to NO_LIMIT because filter is loosened: {}", loosenedColumnD);
}
if (context.hasSort()) {
@@ -464,7 +465,7 @@ public abstract class GTCubeStorageQueryBase implements IStorageQuery {
for (FunctionDesc functionDesc : functionDescs) {
if (functionDesc.isDimensionAsMetric()) {
storageLimitLevel = StorageLimitLevel.NO_LIMIT;
- logger.debug("storageLimitLevel set to NO_LIMIT because {0} isDimensionAsMetric ", functionDesc);
+ logger.debug("storageLimitLevel set to NO_LIMIT because {} isDimensionAsMetric ", functionDesc);
}
}
@@ -483,8 +484,8 @@ public abstract class GTCubeStorageQueryBase implements IStorageQuery {
}
if (!shardByInGroups.isEmpty()) {
enabled = false;
- logger.debug("Aggregate partition results is not beneficial because shard by columns in groupD: {0}",
- shardByInGroups);
+ logger.debug("Aggregate partition results is not beneficial because shard by columns in groupD: {}",
+ shardByInGroups);
}
if (!context.isNeedStorageAggregation()) {
@@ -531,7 +532,7 @@ public abstract class GTCubeStorageQueryBase implements IStorageQuery {
return null;
// OK, push down
- logger.info("Push down having filter {0}", havingFilter);
+ logger.info("Push down having filter {}", havingFilter);
// convert columns in the filter
Set<TblColRef> aggrOutCols = new HashSet<>();
@@ -563,20 +564,21 @@ public abstract class GTCubeStorageQueryBase implements IStorageQuery {
}
if (cuboid.requirePostAggregation()) {
- logger.info("exactAggregation is false because cuboid {0}=>{1}", cuboid.getInputID(), cuboid.getId());
+ logger.info("exactAggregation is false because cuboid {}=>{}", cuboid.getInputID(), cuboid.getId());
return false;
}
// derived aggregation is bad, unless expanded columns are already in group by
if (!groups.containsAll(derivedPostAggregation)) {
- logger.info("exactAggregation is false because derived column require post aggregation: {0}",
+ logger.info("exactAggregation is false because derived column require post aggregation: {}",
derivedPostAggregation);
return false;
}
// other columns (from filter) is bad, unless they are ensured to have single value
if (!singleValuesD.containsAll(othersD)) {
- logger.info("exactAggregation is false because some column not on group by: {0} (single value column: {1})", othersD, singleValuesD);
+ logger.info("exactAggregation is false because some column not on group by: {} (single value column: {})",
+ othersD, singleValuesD);
return false;
}
@@ -599,7 +601,7 @@ public abstract class GTCubeStorageQueryBase implements IStorageQuery {
if (partDesc.isPartitioned()) {
TblColRef col = partDesc.getPartitionDateColumnRef();
if (!groups.contains(col) && !singleValuesD.contains(col)) {
- logger.info("exactAggregation is false because cube is partitioned and %s is not on group by", col);
+ logger.info("exactAggregation is false because cube is partitioned and {} is not on group by", col);
return false;
}
}
@@ -610,7 +612,7 @@ public abstract class GTCubeStorageQueryBase implements IStorageQuery {
return false;
}
- logger.info("exactAggregation is true, cuboid id is {0}", String.valueOf(cuboid.getId()));
+ logger.info("exactAggregation is true, cuboid id is {}", cuboid.getId());
return true;
}
diff --git a/storage-hbase/src/main/java/org/apache/kylin/storage/hbase/steps/CreateHTableJob.java b/storage-hbase/src/main/java/org/apache/kylin/storage/hbase/steps/CreateHTableJob.java
index 354dcae..23a865d 100644
--- a/storage-hbase/src/main/java/org/apache/kylin/storage/hbase/steps/CreateHTableJob.java
+++ b/storage-hbase/src/main/java/org/apache/kylin/storage/hbase/steps/CreateHTableJob.java
@@ -107,7 +107,7 @@ public class CreateHTableJob extends AbstractHadoopJob {
for (Long cuboid : buildingCuboids) {
Double cuboidSize = cuboidSizeMap.get(cuboid);
if (cuboidSize == null) {
- logger.warn("{0} cuboid's size is null will replace by 0", cuboid);
+ logger.warn("{} cuboid's size is null will replace by 0", cuboid);
cuboidSize = 0.0;
}
optimizedCuboidSizeMap.put(cuboid, cuboidSize);
@@ -135,7 +135,7 @@ public class CreateHTableJob extends AbstractHadoopJob {
HTable table = new HTable(hbaseConf, hbaseTableName);
HFileOutputFormat2.configureIncrementalLoadMap(job, table);
- logger.info("Saving HBase configuration to {0}", hbaseConfPath);
+ logger.info("Saving HBase configuration to {}", hbaseConfPath);
FileSystem fs = HadoopUtil.getWorkingFileSystem();
FSDataOutputStream out = null;
try {
@@ -164,7 +164,7 @@ public class CreateHTableJob extends AbstractHadoopJob {
final CubeDesc cubeDesc = cubeSegment.getCubeDesc();
float cut = cubeDesc.getConfig().getKylinHBaseRegionCut();
- logger.info("Cut for HBase region is {0} GB", String.valueOf(cut));
+ logger.info("Cut for HBase region is {} GB", cut);
double totalSizeInM = 0;
for (Double cuboidSize : cubeSizeMap.values()) {
@@ -187,22 +187,21 @@ public class CreateHTableJob extends AbstractHadoopJob {
}
if (nRegion > Short.MAX_VALUE) {
- logger.info("Too many regions! reduce to {0}" + String.valueOf(Short.MAX_VALUE));
+ logger.info("Too many regions! reduce to {}", Short.MAX_VALUE);
nRegion = Short.MAX_VALUE;
}
if (nRegion != original) {
- logger.info(
- "Region count is adjusted from {0} to {1} to help random sharding", String.valueOf(original), String.valueOf(nRegion));
+ logger.info("Region count is adjusted from {} to {} to help random sharding", original, nRegion);
}
}
int mbPerRegion = (int) (totalSizeInM / nRegion);
mbPerRegion = Math.max(1, mbPerRegion);
- logger.info("Total size {0} M (estimated)", String.valueOf(totalSizeInM));
- logger.info("Expecting {0} regions.", String.valueOf(nRegion));
- logger.info("Expecting {0} MB per region.", String.valueOf(mbPerRegion));
+ logger.info("Total size {} M (estimated)", totalSizeInM);
+ logger.info("Expecting {} regions.", nRegion);
+ logger.info("Expecting {} MB per region.", mbPerRegion);
if (cubeSegment.isEnableSharding()) {
//each cuboid will be split into different number of shards
@@ -244,8 +243,8 @@ public class CreateHTableJob extends AbstractHadoopJob {
}
for (int i = 0; i < nRegion; ++i) {
- logger.debug("Region {0}'s estimated size is {1} MB, accounting for {2} percent",
- String.valueOf(i), String.valueOf(regionSizes[i]), String.valueOf(100.0 * regionSizes[i] / totalSizeInM));
+ logger.debug("Region {}'s estimated size is {} MB, accounting for {} percent", i, regionSizes[i],
+ 100.0 * regionSizes[i] / totalSizeInM);
}
CuboidShardUtil.saveCuboidShards(cubeSegment, cuboidShards, nRegion);
@@ -283,7 +282,7 @@ public class CreateHTableJob extends AbstractHadoopJob {
}
int compactionThreshold = Integer.parseInt(hbaseConf.get("hbase.hstore.compactionThreshold", "3"));
- logger.info("hbase.hstore.compactionThreshold is {0}", String.valueOf(compactionThreshold));
+ logger.info("hbase.hstore.compactionThreshold is {}", compactionThreshold);
if (hfileSizeMB > 0.0f && hfileSizeMB * compactionThreshold < mbPerRegion) {
hfileSizeMB = ((float) mbPerRegion) / compactionThreshold;
}
@@ -291,7 +290,7 @@ public class CreateHTableJob extends AbstractHadoopJob {
if (hfileSizeMB <= 0f) {
hfileSizeMB = mbPerRegion;
}
- logger.info("hfileSizeMB {0}", String.valueOf(hfileSizeMB));
+ logger.info("hfileSizeMB {}", hfileSizeMB);
final Path hfilePartitionFile = new Path(outputFolder, "part-r-00000_hfile");
short regionCount = (short) innerRegionSplits.size();
@@ -314,7 +313,7 @@ public class CreateHTableJob extends AbstractHadoopJob {
for (Long cuboid : allCuboids) {
if (accumulatedSize >= hfileSizeMB) {
- logger.debug("Region {0}'s hfile {1} size is {2} mb", String.valueOf(i), String.valueOf(j), String.valueOf(accumulatedSize));
+ logger.debug("Region {}'s hfile {} size is {} mb", i, j, accumulatedSize);
byte[] split = new byte[RowConstants.ROWKEY_SHARD_AND_CUBOID_LEN];
BytesUtil.writeUnsigned(i, split, 0, RowConstants.ROWKEY_SHARDID_LEN);
System.arraycopy(Bytes.toBytes(cuboid), 0, split, RowConstants.ROWKEY_SHARDID_LEN,
diff --git a/tool/src/main/java/org/apache/kylin/tool/CubeMetaIngester.java b/tool/src/main/java/org/apache/kylin/tool/CubeMetaIngester.java
index 8443d03..40971c1 100644
--- a/tool/src/main/java/org/apache/kylin/tool/CubeMetaIngester.java
+++ b/tool/src/main/java/org/apache/kylin/tool/CubeMetaIngester.java
@@ -170,13 +170,13 @@ public class CubeMetaIngester extends AbstractApplication {
TableDesc existing = metadataManager.getTableDesc(tableDesc.getIdentity(), targetProjectName);
if (existing != null && !existing.equals(tableDesc)) {
logger.info("Table {} already has a different version in target metadata store", tableDesc.getIdentity());
- logger.info("Existing version: " + existing);
- logger.info("New version: " + tableDesc);
+ logger.info("Existing version: {}", existing);
+ logger.info("New version: {}", tableDesc);
if (!forceIngest && !overwriteTables) {
throw new IllegalStateException("table already exists with a different version: " + tableDesc.getIdentity() + ". Consider adding -overwriteTables option to force overwriting (with caution)");
} else {
- logger.warn("Overwriting the old table desc: " + tableDesc.getIdentity());
+ logger.warn("Overwriting the old table desc: {}", tableDesc.getIdentity());
}
}
requiredResources.add(tableDesc.getResourcePath());
@@ -207,7 +207,7 @@ public class CubeMetaIngester extends AbstractApplication {
if (!forceIngest) {
throw new IllegalStateException("Already exist a " + type + " called " + name);
} else {
- logger.warn("Overwriting the old {0} desc: {1}", type, name);
+ logger.warn("Overwriting the old {} desc: {}", type, name);
}
}
}