You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@doris.apache.org by li...@apache.org on 2022/12/22 06:33:01 UTC
[doris] branch master updated: [Improvement](external table) support hive external table which stores data on tencent chdfs (#15125)
This is an automated email from the ASF dual-hosted git repository.
lide pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/master by this push:
new 0fa4c78e84 [Improvement](external table) support hive external table which stores data on tencent chdfs (#15125)
0fa4c78e84 is described below
commit 0fa4c78e84c6b4f20d2db8c6f0c2787100b1ad7b
Author: Yulei-Yang <yu...@gmail.com>
AuthorDate: Thu Dec 22 14:32:55 2022 +0800
[Improvement](external table) support hive external table which stores data on tencent chdfs (#15125)
---
.../src/main/java/org/apache/doris/analysis/BrokerDesc.java | 1 +
.../src/main/java/org/apache/doris/backup/BlobStorage.java | 11 +++++++++--
.../src/main/java/org/apache/doris/backup/HdfsStorage.java | 2 +-
.../org/apache/doris/catalog/HiveMetaStoreClientHelper.java | 9 +++++----
.../src/main/java/org/apache/doris/common/FeConstants.java | 1 +
.../main/java/org/apache/doris/planner/BrokerScanNode.java | 3 ++-
.../src/main/java/org/apache/doris/planner/HiveScanNode.java | 3 +++
7 files changed, 22 insertions(+), 8 deletions(-)
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/BrokerDesc.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/BrokerDesc.java
index d0437f13c1..bac9e80b08 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/BrokerDesc.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/BrokerDesc.java
@@ -117,6 +117,7 @@ public class BrokerDesc extends StorageDesc implements Writable {
case STREAM:
return TFileType.FILE_STREAM;
case BROKER:
+ case OFS:
default:
return TFileType.FILE_BROKER;
}
diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/BlobStorage.java b/fe/fe-core/src/main/java/org/apache/doris/backup/BlobStorage.java
index eb8f0402dc..e4a3362490 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/backup/BlobStorage.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/backup/BlobStorage.java
@@ -50,8 +50,15 @@ public abstract class BlobStorage implements Writable {
public static BlobStorage create(String name, StorageBackend.StorageType type, Map<String, String> properties) {
if (type == StorageBackend.StorageType.S3) {
return new S3Storage(properties);
- } else if (type == StorageBackend.StorageType.HDFS) {
- return new HdfsStorage(properties);
+ } else if (type == StorageBackend.StorageType.HDFS || type == StorageBackend.StorageType.OFS) {
+ BlobStorage storage = new HdfsStorage(properties);
+ // as of ofs files, use hdfs storage, but it's type should be ofs
+ if (type == StorageBackend.StorageType.OFS) {
+ storage.setType(type);
+ storage.setName(type.name());
+ }
+
+ return storage;
} else if (type == StorageBackend.StorageType.BROKER) {
return new BrokerStorage(name, properties);
} else {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/HdfsStorage.java b/fe/fe-core/src/main/java/org/apache/doris/backup/HdfsStorage.java
index e245ad6377..be858c6648 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/backup/HdfsStorage.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/backup/HdfsStorage.java
@@ -568,6 +568,6 @@ public class HdfsStorage extends BlobStorage {
@Override
public StorageBackend.StorageType getStorageType() {
- return StorageBackend.StorageType.HDFS;
+ return this.getType();
}
}
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveMetaStoreClientHelper.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveMetaStoreClientHelper.java
index ec097c3593..e43cecb0c7 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveMetaStoreClientHelper.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveMetaStoreClientHelper.java
@@ -32,7 +32,6 @@ import org.apache.doris.analysis.SlotRef;
import org.apache.doris.analysis.StorageBackend;
import org.apache.doris.analysis.StringLiteral;
import org.apache.doris.backup.BlobStorage;
-import org.apache.doris.backup.S3Storage;
import org.apache.doris.common.Config;
import org.apache.doris.common.DdlException;
import org.apache.doris.common.UserException;
@@ -208,7 +207,8 @@ public class HiveMetaStoreClientHelper {
private static String getAllFileStatus(List<TBrokerFileStatus> fileStatuses,
List<RemoteIterator<LocatedFileStatus>> remoteIterators, BlobStorage storage) throws UserException {
- boolean onS3 = storage instanceof S3Storage;
+ boolean needFullPath = storage.getStorageType() == StorageBackend.StorageType.S3
+ || storage.getStorageType() == StorageBackend.StorageType.OFS;
String hdfsUrl = "";
Queue<RemoteIterator<LocatedFileStatus>> queue = Queues.newArrayDeque(remoteIterators);
while (queue.peek() != null) {
@@ -229,7 +229,7 @@ public class HiveMetaStoreClientHelper {
// eg: /home/work/dev/hive/apache-hive-2.3.7-bin/data/warehouse
// + /dae.db/customer/state=CA/city=SanJose/000000_0
String path = fileStatus.getPath().toUri().getPath();
- if (onS3) {
+ if (needFullPath) {
// Backend need full s3 path (with s3://bucket at the beginning) to read the data on s3.
// path = "s3://bucket/path/to/partition/file_name"
// eg: s3://hive-s3-test/region/region.tbl
@@ -759,7 +759,8 @@ public class HiveMetaStoreClientHelper {
if (remoteTable.getPartitionKeys().size() > 0) {
output.append("PARTITIONED BY (\n")
.append(remoteTable.getPartitionKeys().stream().map(
- partition -> String.format(" `%s` `%s`", partition.getName(), partition.getType()))
+ partition ->
+ String.format(" `%s` `%s`", partition.getName(), partition.getType()))
.collect(Collectors.joining(",\n")))
.append(")\n");
}
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/FeConstants.java b/fe/fe-core/src/main/java/org/apache/doris/common/FeConstants.java
index cfa802ea34..2bae2c7a49 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/FeConstants.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/FeConstants.java
@@ -76,6 +76,7 @@ public class FeConstants {
public static String FS_PREFIX_BOS = "bos";
public static String FS_PREFIX_COS = "cos";
public static String FS_PREFIX_OBS = "obs";
+ public static String FS_PREFIX_OFS = "ofs";
public static String FS_PREFIX_HDFS = "hdfs";
public static String FS_PREFIX_FILE = "file";
public static final String INTERNAL_DB_NAME = "__internal_schema";
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/BrokerScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/BrokerScanNode.java
index 975479e89b..41b0d1f770 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/BrokerScanNode.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/BrokerScanNode.java
@@ -317,7 +317,8 @@ public class BrokerScanNode extends LoadScanNode {
// Generate on broker scan range
TBrokerScanRange brokerScanRange = new TBrokerScanRange();
brokerScanRange.setParams(params);
- if (brokerDesc.getStorageType() == StorageBackend.StorageType.BROKER) {
+ if (brokerDesc.getStorageType() == StorageBackend.StorageType.BROKER
+ || brokerDesc.getStorageType() == StorageBackend.StorageType.OFS) {
FsBroker broker = null;
try {
broker = Env.getCurrentEnv().getBrokerMgr()
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/HiveScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/HiveScanNode.java
index 13c4bae3d6..121e2df54d 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/HiveScanNode.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/HiveScanNode.java
@@ -25,6 +25,7 @@ import org.apache.doris.analysis.TupleDescriptor;
import org.apache.doris.catalog.HMSResource;
import org.apache.doris.catalog.HiveMetaStoreClientHelper;
import org.apache.doris.catalog.HiveTable;
+import org.apache.doris.common.FeConstants;
import org.apache.doris.common.UserException;
import org.apache.doris.common.util.Util;
import org.apache.doris.load.BrokerFileGroup;
@@ -132,6 +133,8 @@ public class HiveScanNode extends BrokerScanNode {
this.storageType = StorageBackend.StorageType.S3;
} else if (storagePrefix.equalsIgnoreCase("hdfs")) {
this.storageType = StorageBackend.StorageType.HDFS;
+ } else if (storagePrefix.equalsIgnoreCase(FeConstants.FS_PREFIX_OFS)) {
+ this.storageType = StorageBackend.StorageType.OFS;
} else {
throw new UserException("Not supported storage type: " + storagePrefix);
}
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@doris.apache.org
For additional commands, e-mail: commits-help@doris.apache.org