You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@kylin.apache.org by sh...@apache.org on 2018/10/23 02:59:55 UTC
[kylin] branch 2.5.x updated: KYLIN-3604 Can't build cube with
spark in HBase standalone mode
This is an automated email from the ASF dual-hosted git repository.
shaofengshi pushed a commit to branch 2.5.x
in repository https://gitbox.apache.org/repos/asf/kylin.git
The following commit(s) were added to refs/heads/2.5.x by this push:
new b81b8f7 KYLIN-3604 Can't build cube with spark in HBase standalone mode
b81b8f7 is described below
commit b81b8f72c1d5fd14f19ebe79d5d9abd01b507239
Author: Colin Ma <co...@apache.org>
AuthorDate: Tue Oct 23 10:38:29 2018 +0800
KYLIN-3604 Can't build cube with spark in HBase standalone mode
KYLIN-3604 Can't build cube with spark i
---
.../src/main/java/org/apache/kylin/common/KylinConfigBase.java | 2 +-
.../java/org/apache/kylin/storage/hbase/steps/SparkCubeHFile.java | 8 +++++---
2 files changed, 6 insertions(+), 4 deletions(-)
diff --git a/core-common/src/main/java/org/apache/kylin/common/KylinConfigBase.java b/core-common/src/main/java/org/apache/kylin/common/KylinConfigBase.java
index edd2fff..b31f11c 100644
--- a/core-common/src/main/java/org/apache/kylin/common/KylinConfigBase.java
+++ b/core-common/src/main/java/org/apache/kylin/common/KylinConfigBase.java
@@ -1453,7 +1453,7 @@ abstract public class KylinConfigBase implements Serializable {
}
public boolean isQueryPreparedStatementCacheEnable() {
- return Boolean.parseBoolean(this.getOptional("kylin.query.statement-cache-enabled", TRUE));
+ return Boolean.parseBoolean(this.getOptional("kylin.query.statement-cache-enabled", "true"));
}
public int getDimCountDistinctMaxCardinality() {
diff --git a/storage-hbase/src/main/java/org/apache/kylin/storage/hbase/steps/SparkCubeHFile.java b/storage-hbase/src/main/java/org/apache/kylin/storage/hbase/steps/SparkCubeHFile.java
index 539f03b..00d2ef8 100644
--- a/storage-hbase/src/main/java/org/apache/kylin/storage/hbase/steps/SparkCubeHFile.java
+++ b/storage-hbase/src/main/java/org/apache/kylin/storage/hbase/steps/SparkCubeHFile.java
@@ -180,7 +180,9 @@ public class SparkCubeHFile extends AbstractApplication implements Serializable
//HBase conf
logger.info("Loading HBase configuration from:" + hbaseConfFile);
- FSDataInputStream confInput = fs.open(new Path(hbaseConfFile));
+ final Path hbaseConfFilePath = new Path(hbaseConfFile);
+ final FileSystem hbaseClusterFs = hbaseConfFilePath.getFileSystem(sc.hadoopConfiguration());
+ FSDataInputStream confInput = hbaseClusterFs.open(new Path(hbaseConfFile));
Configuration hbaseJobConf = new Configuration();
hbaseJobConf.addResource(confInput);
@@ -189,7 +191,7 @@ public class SparkCubeHFile extends AbstractApplication implements Serializable
FileOutputFormat.setOutputPath(job, new Path(outputPath));
- JavaPairRDD<Text, Text> inputRDDs = SparkUtil.parseInputPath(inputPath, fs, sc, Text.class, Text.class);
+ JavaPairRDD<Text, Text> inputRDDs = SparkUtil.parseInputPath(inputPath, hbaseClusterFs, sc, Text.class, Text.class);
final JavaPairRDD<RowKeyWritable, KeyValue> hfilerdd;
if (quickPath) {
hfilerdd = inputRDDs.mapToPair(new PairFunction<Tuple2<Text, Text>, RowKeyWritable, KeyValue>() {
@@ -241,7 +243,7 @@ public class SparkCubeHFile extends AbstractApplication implements Serializable
// save counter to hdfs
HadoopUtil.writeToSequenceFile(sc.hadoopConfiguration(), counterPath, counterMap);
- //HadoopUtil.deleteHDFSMeta(metaUrl);
+ HadoopUtil.deleteHDFSMeta(metaUrl);
}
static class HFilePartitioner extends Partitioner {