You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by do...@apache.org on 2020/07/18 22:09:24 UTC

[spark] branch master updated: [SPARK-29292][YARN][K8S][MESOS] Fix Scala 2.13 compilation for remaining modules

This is an automated email from the ASF dual-hosted git repository.

dongjoon pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new ee62482  [SPARK-29292][YARN][K8S][MESOS] Fix Scala 2.13 compilation for remaining modules
ee62482 is described below

commit ee624821a903a263c844ed849d6833df8e9ad43e
Author: Sean Owen <sr...@gmail.com>
AuthorDate: Sat Jul 18 15:08:00 2020 -0700

    [SPARK-29292][YARN][K8S][MESOS] Fix Scala 2.13 compilation for remaining modules
    
    ### What changes were proposed in this pull request?
    
    See again the related PRs like https://github.com/apache/spark/pull/28971
    This completes fixing compilation for 2.13 for all but `repl`, which is a separate task.
    
    ### Why are the changes needed?
    
    Eventually, we need to support a Scala 2.13 build, perhaps in Spark 3.1.
    
    ### Does this PR introduce _any_ user-facing change?
    
    No
    
    ### How was this patch tested?
    
    Existing tests. (2.13 was not tested; this is about getting it to compile without breaking 2.12)
    
    Closes #29147 from srowen/SPARK-29292.4.
    
    Authored-by: Sean Owen <sr...@gmail.com>
    Signed-off-by: Dongjoon Hyun <do...@apache.org>
---
 .../org/apache/spark/streaming/kinesis/KinesisTestUtils.scala  |  6 +++---
 .../spark/streaming/kinesis/KPLBasedKinesisTestUtils.scala     |  2 +-
 .../spark/streaming/kinesis/KinesisBackedBlockRDDSuite.scala   |  4 ++--
 .../cluster/k8s/ExecutorPodsPollingSnapshotSource.scala        |  2 +-
 .../scheduler/cluster/k8s/ExecutorPodsSnapshotsStoreImpl.scala |  2 +-
 .../cluster/k8s/DeterministicExecutorPodsSnapshotsStore.scala  |  2 +-
 .../apache/spark/deploy/k8s/integrationtest/ProcessUtils.scala |  2 +-
 .../spark/scheduler/cluster/mesos/MesosClusterScheduler.scala  |  8 ++++----
 .../cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala     |  4 ++--
 .../spark/scheduler/cluster/mesos/MesosSchedulerUtils.scala    |  2 +-
 .../scheduler/cluster/mesos/MesosClusterSchedulerSuite.scala   |  2 +-
 .../cluster/mesos/MesosFineGrainedSchedulerBackendSuite.scala  |  6 ++++--
 .../src/main/scala/org/apache/spark/deploy/yarn/Client.scala   |  4 ++--
 .../spark/deploy/yarn/ClientDistributedCacheManager.scala      | 10 +++++-----
 .../scala/org/apache/spark/deploy/yarn/YarnAllocator.scala     |  8 ++++----
 .../spark/executor/YarnCoarseGrainedExecutorBackend.scala      |  4 ++--
 .../apache/spark/sql/hive/thriftserver/SparkOperation.scala    |  2 +-
 17 files changed, 36 insertions(+), 34 deletions(-)

diff --git a/external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/KinesisTestUtils.scala b/external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/KinesisTestUtils.scala
index 2ac83c8..4dacc03 100644
--- a/external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/KinesisTestUtils.scala
+++ b/external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/KinesisTestUtils.scala
@@ -97,7 +97,7 @@ private[kinesis] class KinesisTestUtils(streamShardCount: Int = 2) extends Loggi
   }
 
   def getShards(): Seq[Shard] = {
-    kinesisClient.describeStream(_streamName).getStreamDescription.getShards.asScala
+    kinesisClient.describeStream(_streamName).getStreamDescription.getShards.asScala.toSeq
   }
 
   def splitShard(shardId: String): Unit = {
@@ -137,7 +137,7 @@ private[kinesis] class KinesisTestUtils(streamShardCount: Int = 2) extends Loggi
    * Expose a Python friendly API.
    */
   def pushData(testData: java.util.List[Int]): Unit = {
-    pushData(testData.asScala, aggregate = false)
+    pushData(testData.asScala.toSeq, aggregate = false)
   }
 
   def deleteStream(): Unit = {
@@ -289,6 +289,6 @@ private[kinesis] class SimpleDataGenerator(
       sentSeqNumbers += ((num, seqNumber))
     }
 
-    shardIdToSeqNumbers.toMap
+    shardIdToSeqNumbers.mapValues(_.toSeq).toMap
   }
 }
diff --git a/external/kinesis-asl/src/test/scala/org/apache/spark/streaming/kinesis/KPLBasedKinesisTestUtils.scala b/external/kinesis-asl/src/test/scala/org/apache/spark/streaming/kinesis/KPLBasedKinesisTestUtils.scala
index af84498..c76eb7c 100644
--- a/external/kinesis-asl/src/test/scala/org/apache/spark/streaming/kinesis/KPLBasedKinesisTestUtils.scala
+++ b/external/kinesis-asl/src/test/scala/org/apache/spark/streaming/kinesis/KPLBasedKinesisTestUtils.scala
@@ -72,6 +72,6 @@ private[kinesis] class KPLDataGenerator(regionName: String) extends KinesisDataG
       Futures.addCallback(future, kinesisCallBack, ThreadUtils.sameThreadExecutorService)
     }
     producer.flushSync()
-    shardIdToSeqNumbers.toMap
+    shardIdToSeqNumbers.mapValues(_.toSeq).toMap
   }
 }
diff --git a/external/kinesis-asl/src/test/scala/org/apache/spark/streaming/kinesis/KinesisBackedBlockRDDSuite.scala b/external/kinesis-asl/src/test/scala/org/apache/spark/streaming/kinesis/KinesisBackedBlockRDDSuite.scala
index 2c7b9c5..12d9500 100644
--- a/external/kinesis-asl/src/test/scala/org/apache/spark/streaming/kinesis/KinesisBackedBlockRDDSuite.scala
+++ b/external/kinesis-asl/src/test/scala/org/apache/spark/streaming/kinesis/KinesisBackedBlockRDDSuite.scala
@@ -47,8 +47,8 @@ abstract class KinesisBackedBlockRDDTests(aggregateTestData: Boolean)
       require(shardIdToDataAndSeqNumbers.size > 1, "Need data to be sent to multiple shards")
 
       shardIds = shardIdToDataAndSeqNumbers.keySet.toSeq
-      shardIdToData = shardIdToDataAndSeqNumbers.mapValues { _.map { _._1 }}
-      shardIdToSeqNumbers = shardIdToDataAndSeqNumbers.mapValues { _.map { _._2 }}
+      shardIdToData = shardIdToDataAndSeqNumbers.mapValues(_.map(_._1)).toMap
+      shardIdToSeqNumbers = shardIdToDataAndSeqNumbers.mapValues(_.map(_._2)).toMap
       shardIdToRange = shardIdToSeqNumbers.map { case (shardId, seqNumbers) =>
         val seqNumRange = SequenceNumberRange(
           testUtils.streamName, shardId, seqNumbers.head, seqNumbers.last, seqNumbers.size)
diff --git a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsPollingSnapshotSource.scala b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsPollingSnapshotSource.scala
index fd8f697..da7fe7c 100644
--- a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsPollingSnapshotSource.scala
+++ b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsPollingSnapshotSource.scala
@@ -62,7 +62,7 @@ private[spark] class ExecutorPodsPollingSnapshotSource(
         .withoutLabel(SPARK_EXECUTOR_INACTIVE_LABEL, "true")
         .list()
         .getItems
-        .asScala)
+        .asScala.toSeq)
     }
   }
 
diff --git a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsSnapshotsStoreImpl.scala b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsSnapshotsStoreImpl.scala
index d68dc3e..5c192c6 100644
--- a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsSnapshotsStoreImpl.scala
+++ b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsSnapshotsStoreImpl.scala
@@ -131,7 +131,7 @@ private[spark] class ExecutorPodsSnapshotsStoreImpl(subscribersExecutor: Schedul
           try {
             val snapshots = new ArrayList[ExecutorPodsSnapshot]()
             snapshotsBuffer.drainTo(snapshots)
-            onNewSnapshots(snapshots.asScala)
+            onNewSnapshots(snapshots.asScala.toSeq)
           } catch {
             case NonFatal(e) => logWarning("Exception when notifying snapshot subscriber.", e)
           } finally {
diff --git a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/scheduler/cluster/k8s/DeterministicExecutorPodsSnapshotsStore.scala b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/scheduler/cluster/k8s/DeterministicExecutorPodsSnapshotsStore.scala
index 9ac7e02..6dc052a 100644
--- a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/scheduler/cluster/k8s/DeterministicExecutorPodsSnapshotsStore.scala
+++ b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/scheduler/cluster/k8s/DeterministicExecutorPodsSnapshotsStore.scala
@@ -35,7 +35,7 @@ class DeterministicExecutorPodsSnapshotsStore extends ExecutorPodsSnapshotsStore
   override def stop(): Unit = {}
 
   override def notifySubscribers(): Unit = {
-    subscribers.foreach(_(snapshotsBuffer))
+    subscribers.foreach(_(snapshotsBuffer.toSeq))
     snapshotsBuffer.clear()
   }
 
diff --git a/resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/ProcessUtils.scala b/resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/ProcessUtils.scala
index 7d76a22..cce842c 100644
--- a/resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/ProcessUtils.scala
+++ b/resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/ProcessUtils.scala
@@ -47,6 +47,6 @@ object ProcessUtils extends Logging {
     assert(proc.exitValue == 0,
       s"Failed to execute ${fullCommand.mkString(" ")}" +
         s"${if (dumpErrors) "\n" + outputLines.mkString("\n")}")
-    outputLines
+    outputLines.toSeq
   }
 }
diff --git a/resource-managers/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterScheduler.scala b/resource-managers/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterScheduler.scala
index 26939ef..e5c1853 100644
--- a/resource-managers/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterScheduler.scala
+++ b/resource-managers/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterScheduler.scala
@@ -383,13 +383,13 @@ private[spark] class MesosClusterScheduler(
     taskId.split(s"${RETRY_SEP}").head
   }
 
-  private def adjust[A, B](m: collection.Map[A, B], k: A, default: B)(f: B => B) = {
+  private def adjust[A, B](m: Map[A, B], k: A, default: B)(f: B => B) = {
     m.updated(k, f(m.getOrElse(k, default)))
   }
 
   private def getDriverEnvironment(desc: MesosDriverDescription): Environment = {
     // TODO(mgummelt): Don't do this here.  This should be passed as a --conf
-    val commandEnv = adjust(desc.command.environment, "SPARK_SUBMIT_OPTS", "")(
+    val commandEnv = adjust(desc.command.environment.toMap, "SPARK_SUBMIT_OPTS", "")(
       v => s"$v -D${config.DRIVER_FRAMEWORK_ID.key}=${getDriverFrameworkID(desc)}"
     )
 
@@ -686,14 +686,14 @@ private[spark] class MesosClusterScheduler(
       }
 
       scheduleTasks(
-        copyBuffer(driversToRetry),
+        copyBuffer(driversToRetry).toSeq,
         removeFromPendingRetryDrivers,
         currentOffers,
         tasks)
 
       // Then we walk through the queued drivers and try to schedule them.
       scheduleTasks(
-        copyBuffer(queuedDrivers),
+        copyBuffer(queuedDrivers).toSeq,
         removeFromQueuedDrivers,
         currentOffers,
         tasks)
diff --git a/resource-managers/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala b/resource-managers/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala
index 5e7a29a..32cd502 100644
--- a/resource-managers/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala
+++ b/resource-managers/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala
@@ -491,8 +491,8 @@ private[spark] class MesosCoarseGrainedSchedulerBackend(
     val tasks = new mutable.HashMap[OfferID, List[MesosTaskInfo]].withDefaultValue(Nil)
 
     // offerID -> resources
-    val remainingResources = mutable.Map(offers.map(offer =>
-      (offer.getId.getValue, offer.getResourcesList)): _*)
+    val remainingResources = mutable.Map[String, JList[Resource]]()
+    remainingResources ++= offers.map(offer => (offer.getId.getValue, offer.getResourcesList))
 
     var launchTasks = true
 
diff --git a/resource-managers/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerUtils.scala b/resource-managers/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerUtils.scala
index 5784ee3..2be8835 100644
--- a/resource-managers/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerUtils.scala
+++ b/resource-managers/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerUtils.scala
@@ -379,7 +379,7 @@ trait MesosSchedulerUtils extends Logging {
           } else {
             v.split(',').toSet
           }
-        )
+        ).toMap
       } catch {
         case NonFatal(e) =>
           throw new IllegalArgumentException(s"Bad constraint string: $constraintsVal", e)
diff --git a/resource-managers/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterSchedulerSuite.scala b/resource-managers/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterSchedulerSuite.scala
index bb37bbd..2433c16 100644
--- a/resource-managers/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterSchedulerSuite.scala
+++ b/resource-managers/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterSchedulerSuite.scala
@@ -146,7 +146,7 @@ class MesosClusterSchedulerSuite extends SparkFunSuite with LocalSparkContext wi
     val resources = taskInfo.getResourcesList
     assert(scheduler.getResource(resources, "cpus") == 1.5)
     assert(scheduler.getResource(resources, "mem") == 1200)
-    val resourcesSeq: Seq[Resource] = resources.asScala
+    val resourcesSeq: Seq[Resource] = resources.asScala.toSeq
     val cpus = resourcesSeq.filter(_.getName == "cpus").toList
     assert(cpus.size == 2)
     assert(cpus.exists(_.getRole() == "role2"))
diff --git a/resource-managers/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosFineGrainedSchedulerBackendSuite.scala b/resource-managers/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosFineGrainedSchedulerBackendSuite.scala
index 36ed848..67ecf32 100644
--- a/resource-managers/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosFineGrainedSchedulerBackendSuite.scala
+++ b/resource-managers/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosFineGrainedSchedulerBackendSuite.scala
@@ -267,7 +267,8 @@ class MesosFineGrainedSchedulerBackendSuite
       properties = new Properties(),
       resources = immutable.Map.empty[String, ResourceInformation],
       ByteBuffer.wrap(new Array[Byte](0)))
-    when(taskScheduler.resourceOffers(expectedWorkerOffers)).thenReturn(Seq(Seq(taskDesc)))
+    when(taskScheduler.resourceOffers(
+      expectedWorkerOffers.toIndexedSeq)).thenReturn(Seq(Seq(taskDesc)))
     when(taskScheduler.CPUS_PER_TASK).thenReturn(2)
 
     val capture = ArgumentCaptor.forClass(classOf[Collection[TaskInfo]])
@@ -379,7 +380,8 @@ class MesosFineGrainedSchedulerBackendSuite
       properties = new Properties(),
       resources = immutable.Map.empty[String, ResourceInformation],
       ByteBuffer.wrap(new Array[Byte](0)))
-    when(taskScheduler.resourceOffers(expectedWorkerOffers)).thenReturn(Seq(Seq(taskDesc)))
+    when(taskScheduler.resourceOffers(
+      expectedWorkerOffers.toIndexedSeq)).thenReturn(Seq(Seq(taskDesc)))
     when(taskScheduler.CPUS_PER_TASK).thenReturn(1)
 
     val capture = ArgumentCaptor.forClass(classOf[Collection[TaskInfo]])
diff --git a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala
index 7b12119..1045fb0 100644
--- a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala
+++ b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala
@@ -553,7 +553,7 @@ private[spark] class Client(
           }
 
           // Propagate the local URIs to the containers using the configuration.
-          sparkConf.set(SPARK_JARS, localJars)
+          sparkConf.set(SPARK_JARS, localJars.toSeq)
 
         case None =>
           // No configuration, so fall back to uploading local jar files.
@@ -628,7 +628,7 @@ private[spark] class Client(
       }
     }
     if (cachedSecondaryJarLinks.nonEmpty) {
-      sparkConf.set(SECONDARY_JARS, cachedSecondaryJarLinks)
+      sparkConf.set(SECONDARY_JARS, cachedSecondaryJarLinks.toSeq)
     }
 
     if (isClusterMode && args.primaryPyFile != null) {
diff --git a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/ClientDistributedCacheManager.scala b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/ClientDistributedCacheManager.scala
index e6e0ea3..e02fbd0 100644
--- a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/ClientDistributedCacheManager.scala
+++ b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/ClientDistributedCacheManager.scala
@@ -91,11 +91,11 @@ private[spark] class ClientDistributedCacheManager() extends Logging {
    * Writes down information about cached files needed in executors to the given configuration.
    */
   def updateConfiguration(conf: SparkConf): Unit = {
-    conf.set(CACHED_FILES, distCacheEntries.map(_.uri.toString))
-    conf.set(CACHED_FILES_SIZES, distCacheEntries.map(_.size))
-    conf.set(CACHED_FILES_TIMESTAMPS, distCacheEntries.map(_.modTime))
-    conf.set(CACHED_FILES_VISIBILITIES, distCacheEntries.map(_.visibility.name()))
-    conf.set(CACHED_FILES_TYPES, distCacheEntries.map(_.resType.name()))
+    conf.set(CACHED_FILES, distCacheEntries.map(_.uri.toString).toSeq)
+    conf.set(CACHED_FILES_SIZES, distCacheEntries.map(_.size).toSeq)
+    conf.set(CACHED_FILES_TIMESTAMPS, distCacheEntries.map(_.modTime).toSeq)
+    conf.set(CACHED_FILES_VISIBILITIES, distCacheEntries.map(_.visibility.name()).toSeq)
+    conf.set(CACHED_FILES_TYPES, distCacheEntries.map(_.resType.name()).toSeq)
   }
 
   /**
diff --git a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala
index cd0e7d5..dc09323 100644
--- a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala
+++ b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala
@@ -296,7 +296,7 @@ private[yarn] class YarnAllocator(
       val profResource = rpIdToYarnResource.get(id)
       val result = amClient.getMatchingRequests(getContainerPriority(id), location, profResource)
         .asScala.flatMap(_.asScala)
-      allContainerRequests(id) = result
+      allContainerRequests(id) = result.toSeq
     }
     allContainerRequests.toMap
   }
@@ -426,13 +426,13 @@ private[yarn] class YarnAllocator(
           getNumExecutorsStarting,
           allocateResponse.getAvailableResources))
 
-      handleAllocatedContainers(allocatedContainers.asScala)
+      handleAllocatedContainers(allocatedContainers.asScala.toSeq)
     }
 
     val completedContainers = allocateResponse.getCompletedContainersStatuses()
     if (completedContainers.size > 0) {
       logDebug("Completed %d containers".format(completedContainers.size))
-      processCompletedContainers(completedContainers.asScala)
+      processCompletedContainers(completedContainers.asScala.toSeq)
       logDebug("Finished processing %d completed containers. Current running executor count: %d."
         .format(completedContainers.size, getNumExecutorsRunning))
     }
@@ -960,7 +960,7 @@ private[yarn] class YarnAllocator(
       }
     }
 
-    (localityMatched, localityUnMatched, localityFree)
+    (localityMatched.toSeq, localityUnMatched.toSeq, localityFree.toSeq)
   }
 
 }
diff --git a/resource-managers/yarn/src/main/scala/org/apache/spark/executor/YarnCoarseGrainedExecutorBackend.scala b/resource-managers/yarn/src/main/scala/org/apache/spark/executor/YarnCoarseGrainedExecutorBackend.scala
index 669e39f..ce46ffa 100644
--- a/resource-managers/yarn/src/main/scala/org/apache/spark/executor/YarnCoarseGrainedExecutorBackend.scala
+++ b/resource-managers/yarn/src/main/scala/org/apache/spark/executor/YarnCoarseGrainedExecutorBackend.scala
@@ -73,8 +73,8 @@ private[spark] object YarnCoarseGrainedExecutorBackend extends Logging {
     val createFn: (RpcEnv, CoarseGrainedExecutorBackend.Arguments, SparkEnv, ResourceProfile) =>
       CoarseGrainedExecutorBackend = { case (rpcEnv, arguments, env, resourceProfile) =>
       new YarnCoarseGrainedExecutorBackend(rpcEnv, arguments.driverUrl, arguments.executorId,
-        arguments.bindAddress, arguments.hostname, arguments.cores, arguments.userClassPath, env,
-        arguments.resourcesFileOpt, resourceProfile)
+        arguments.bindAddress, arguments.hostname, arguments.cores, arguments.userClassPath.toSeq,
+        env, arguments.resourcesFileOpt, resourceProfile)
     }
     val backendArgs = CoarseGrainedExecutorBackend.parseArguments(args,
       this.getClass.getCanonicalName.stripSuffix("$"))
diff --git a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkOperation.scala b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkOperation.scala
index 18d2950..bbfc1b8 100644
--- a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkOperation.scala
+++ b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkOperation.scala
@@ -37,7 +37,7 @@ private[hive] trait SparkOperation extends Operation with Logging {
 
   protected var statementId = getHandle().getHandleIdentifier().getPublicId().toString()
 
-  protected def cleanup(): Unit = Unit // noop by default
+  protected def cleanup(): Unit = () // noop by default
 
   abstract override def run(): Unit = {
     withLocalProperties {


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org