You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by we...@apache.org on 2018/12/21 05:01:36 UTC
[spark] branch master updated: [MINOR][SQL] Locality does not need
to be implemented
This is an automated email from the ASF dual-hosted git repository.
wenchen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/master by this push:
new 98ecda3 [MINOR][SQL] Locality does not need to be implemented
98ecda3 is described below
commit 98ecda3e8ef9db5e21b5b9605df09d1653094b9c
Author: liuxian <li...@zte.com.cn>
AuthorDate: Fri Dec 21 13:01:14 2018 +0800
[MINOR][SQL] Locality does not need to be implemented
## What changes were proposed in this pull request?
`HadoopFileWholeTextReader` and `HadoopFileLinesReader` will be eventually called in `FileSourceScanExec`.
In fact, locality has been implemented in `FileScanRDD`, even if we implement it in `HadoopFileWholeTextReader ` and `HadoopFileLinesReader`, it would be useless.
So I think these `TODO` can be removed.
## How was this patch tested?
N/A
Closes #23339 from 10110346/noneededtodo.
Authored-by: liuxian <li...@zte.com.cn>
Signed-off-by: Wenchen Fan <we...@databricks.com>
---
.../apache/spark/sql/execution/datasources/HadoopFileLinesReader.scala | 2 +-
.../spark/sql/execution/datasources/HadoopFileWholeTextReader.scala | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/HadoopFileLinesReader.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/HadoopFileLinesReader.scala
index 00a78f7..57082b4 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/HadoopFileLinesReader.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/HadoopFileLinesReader.scala
@@ -51,7 +51,7 @@ class HadoopFileLinesReader(
new Path(new URI(file.filePath)),
file.start,
file.length,
- // TODO: Implement Locality
+ // The locality is decided by `getPreferredLocations` in `FileScanRDD`.
Array.empty)
val attemptId = new TaskAttemptID(new TaskID(new JobID(), TaskType.MAP, 0), 0)
val hadoopAttemptContext = new TaskAttemptContextImpl(conf, attemptId)
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/HadoopFileWholeTextReader.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/HadoopFileWholeTextReader.scala
index c61a89e..f5724f7 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/HadoopFileWholeTextReader.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/HadoopFileWholeTextReader.scala
@@ -40,7 +40,7 @@ class HadoopFileWholeTextReader(file: PartitionedFile, conf: Configuration)
Array(new Path(new URI(file.filePath))),
Array(file.start),
Array(file.length),
- // TODO: Implement Locality
+ // The locality is decided by `getPreferredLocations` in `FileScanRDD`.
Array.empty[String])
val attemptId = new TaskAttemptID(new TaskID(new JobID(), TaskType.MAP, 0), 0)
val hadoopAttemptContext = new TaskAttemptContextImpl(conf, attemptId)
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org