You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@kyuubi.apache.org by ch...@apache.org on 2023/02/21 09:25:03 UTC

[kyuubi] branch master updated: [KYUUBI #4391] Improve code for hive-connector FileWriterFactory

This is an automated email from the ASF dual-hosted git repository.

chengpan pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kyuubi.git


The following commit(s) were added to refs/heads/master by this push:
     new 3b73e1d64 [KYUUBI #4391] Improve code for hive-connector FileWriterFactory
3b73e1d64 is described below

commit 3b73e1d64af8cb6351936d277ff57d98faa30c41
Author: Yikf <yi...@apache.org>
AuthorDate: Tue Feb 21 17:24:53 2023 +0800

    [KYUUBI #4391] Improve code for hive-connector FileWriterFactory
    
    ### _Why are the changes needed?_
    
    This pr aims to improve code for hive-connector FileWriterFactory, the main goal is to reduce duplicate copies of spark code.
    
    ### _How was this patch tested?_
    - [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
    
    - [ ] Add screenshots for manual tests if appropriate
    
    - [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
    
    Closes #4391 from Yikf/improve-code.
    
    Closes #4391
    
    7991f145 [Yikf] improve code for hive-connector FileWriterFactory
    
    Authored-by: Yikf <yi...@apache.org>
    Signed-off-by: Cheng Pan <ch...@apache.org>
---
 .../spark/connector/hive/write/FileWriterFactory.scala | 18 +-----------------
 1 file changed, 1 insertion(+), 17 deletions(-)

diff --git a/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/write/FileWriterFactory.scala b/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/write/FileWriterFactory.scala
index c8e8f9b69..6ebb55f14 100644
--- a/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/write/FileWriterFactory.scala
+++ b/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/write/FileWriterFactory.scala
@@ -19,7 +19,6 @@ package org.apache.kyuubi.spark.connector.hive.write
 
 import java.util.Date
 
-import org.apache.hadoop.mapred.JobID
 import org.apache.hadoop.mapreduce.{TaskAttemptID, TaskID, TaskType}
 import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl
 import org.apache.spark.internal.io.FileCommitProtocol
@@ -35,7 +34,7 @@ case class FileWriterFactory(
     description: WriteJobDescription,
     committer: FileCommitProtocol) extends DataWriterFactory {
 
-  private val jobTrackerId = sparkHadoopWriterUtils.createJobTrackerID(new Date)
+  @transient private lazy val jobId = sparkHadoopWriterUtils.createJobID(new Date, 0)
 
   override def createWriter(partitionId: Int, realTaskId: Long): DataWriter[InternalRow] = {
     val taskAttemptContext = createTaskAttemptContext(partitionId)
@@ -48,7 +47,6 @@ case class FileWriterFactory(
   }
 
   private def createTaskAttemptContext(partitionId: Int): TaskAttemptContextImpl = {
-    val jobId = createJobID(jobTrackerId, 0)
     val taskId = new TaskID(jobId, TaskType.MAP, partitionId)
     val taskAttemptId = new TaskAttemptID(taskId, 0)
     // Set up the configuration object
@@ -61,18 +59,4 @@ case class FileWriterFactory(
 
     new TaskAttemptContextImpl(hadoopConf, taskAttemptId)
   }
-
-  /**
-   * Create a job ID.
-   *
-   * @param jobTrackerID unique job track id
-   * @param id job number
-   * @return a job ID
-   */
-  def createJobID(jobTrackerID: String, id: Int): JobID = {
-    if (id < 0) {
-      throw new IllegalArgumentException("Job number is negative")
-    }
-    new JobID(jobTrackerID, id)
-  }
 }