You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by sr...@apache.org on 2022/06/14 14:36:47 UTC

[spark] branch master updated: [SPARK-39298][CORE][SQL][DSTREAM][GRAPHX][ML][MLLIB][SS][YARN] Replace constructing ranges of collection indices manually with `.indices`

This is an automated email from the ASF dual-hosted git repository.

srowen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 5556cfc59aa [SPARK-39298][CORE][SQL][DSTREAM][GRAPHX][ML][MLLIB][SS][YARN] Replace constructing ranges of collection indices manually with `.indices`
5556cfc59aa is described below

commit 5556cfc59aa97a3ad4ea0baacebe19859ec0bcb7
Author: yangjie01 <ya...@baidu.com>
AuthorDate: Tue Jun 14 09:36:30 2022 -0500

    [SPARK-39298][CORE][SQL][DSTREAM][GRAPHX][ML][MLLIB][SS][YARN] Replace constructing ranges of collection indices manually with `.indices`
    
    ### What changes were proposed in this pull request?
    This pr is a trivial change: use `Seq.indices` instead of constructing ranges of collection indices manually.
    
    **Before**
    ```scala
      var x: Seq[Int]
      Range(0, x.size)
      0 until x.size
      0.to(x.size - 1)
    ```
    **After**
    
    ```scala
    var x: Seq[Int]
    x.indices
    ```
    
    ### Why are the changes needed?
    Use API instead of manual coding.
    
    ### Does this PR introduce _any_ user-facing change?
    No
    
    ### How was this patch tested?
    Pass Github Actions
    
    Closes #36679 from LuciferYang/seq-indices.
    
    Authored-by: yangjie01 <ya...@baidu.com>
    Signed-off-by: Sean Owen <sr...@gmail.com>
---
 .../spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala      |  2 +-
 .../spark/streaming/kafka010/KafkaDataConsumerSuite.scala    |  2 +-
 .../spark/streaming/kinesis/KinesisBackedBlockRDDSuite.scala |  2 +-
 core/src/main/scala/org/apache/spark/MapOutputTracker.scala  |  2 +-
 core/src/main/scala/org/apache/spark/SparkContext.scala      |  8 ++++----
 .../main/scala/org/apache/spark/deploy/master/Master.scala   |  2 +-
 .../scala/org/apache/spark/metrics/ExecutorMetricType.scala  |  2 +-
 core/src/main/scala/org/apache/spark/rdd/BlockRDD.scala      |  2 +-
 core/src/main/scala/org/apache/spark/rdd/CoGroupedRDD.scala  |  2 +-
 core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala  |  2 +-
 core/src/main/scala/org/apache/spark/rdd/SubtractedRDD.scala |  2 +-
 .../scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala |  2 +-
 .../main/scala/org/apache/spark/storage/BlockManager.scala   |  2 +-
 .../scala/org/apache/spark/storage/memory/MemoryStore.scala  |  2 +-
 .../org/apache/spark/BarrierStageOnSubmittedSuite.scala      |  2 +-
 .../org/apache/spark/rdd/ParallelCollectionSplitSuite.scala  |  4 ++--
 .../scala/org/apache/spark/scheduler/AQEShuffledRDD.scala    |  2 +-
 .../spark/scheduler/CoarseGrainedSchedulerBackendSuite.scala |  2 +-
 .../scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala |  2 +-
 .../spark/scheduler/OutputCommitCoordinatorSuite.scala       |  4 ++--
 .../test/scala/org/apache/spark/util/FileAppenderSuite.scala |  4 ++--
 .../test/scala/org/apache/spark/util/JsonProtocolSuite.scala |  2 +-
 .../util/collection/unsafe/sort/PrefixComparatorsSuite.scala |  2 +-
 .../scala/org/apache/spark/examples/MultiBroadcastTest.scala |  4 ++--
 .../main/scala/org/apache/spark/examples/SparkKMeans.scala   |  2 +-
 .../apache/spark/graphx/impl/ShippableVertexPartition.scala  |  2 +-
 .../src/test/scala/org/apache/spark/graphx/EdgeSuite.scala   |  2 +-
 mllib/src/main/scala/org/apache/spark/ml/ann/Layer.scala     |  8 ++++----
 .../org/apache/spark/ml/feature/QuantileDiscretizer.scala    |  2 +-
 .../scala/org/apache/spark/ml/feature/StringIndexer.scala    |  4 ++--
 .../scala/org/apache/spark/ml/tuning/CrossValidator.scala    |  4 ++--
 .../org/apache/spark/ml/tuning/TrainValidationSplit.scala    |  4 ++--
 .../org/apache/spark/ml/classification/NaiveBayesSuite.scala |  2 +-
 .../scala/org/apache/spark/ml/feature/BucketizerSuite.scala  |  6 +++---
 .../apache/spark/mllib/classification/NaiveBayesSuite.scala  |  2 +-
 .../org/apache/spark/deploy/yarn/ApplicationMaster.scala     |  2 +-
 .../spark/sql/catalyst/analysis/ResolveDefaultColumns.scala  |  4 ++--
 .../sql/catalyst/expressions/collectionOperations.scala      |  2 +-
 .../org/apache/spark/sql/catalyst/util/ToNumberParser.scala  |  2 +-
 .../spark/sql/catalyst/expressions/PredicateSuite.scala      | 12 ++++++------
 .../spark/sql/connector/catalog/InMemoryPartitionTable.scala |  2 +-
 .../main/scala/org/apache/spark/sql/execution/Columnar.scala |  2 +-
 .../org/apache/spark/sql/execution/ShuffledRowRDD.scala      |  2 +-
 .../apache/spark/sql/streaming/StateStoreMetricsTest.scala   |  2 +-
 .../org/apache/spark/sql/hive/execution/UDAQuerySuite.scala  |  4 ++--
 .../spark/streaming/scheduler/ReceiverSchedulingPolicy.scala |  2 +-
 .../org/apache/spark/streaming/BasicOperationsSuite.scala    |  2 +-
 .../scala/org/apache/spark/streaming/MasterFailureTest.scala |  2 +-
 .../scala/org/apache/spark/streaming/TestSuiteBase.scala     |  2 +-
 49 files changed, 71 insertions(+), 71 deletions(-)

diff --git a/connector/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala b/connector/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala
index 2396f31b954..e7a12dedf21 100644
--- a/connector/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala
+++ b/connector/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala
@@ -2281,7 +2281,7 @@ abstract class KafkaSourceSuiteBase extends KafkaSourceTest {
       val headers = row.getList[Row](row.fieldIndex("headers")).asScala
       assert(headers.length === expected.length)
 
-      (0 until expected.length).foreach { idx =>
+      expected.indices.foreach { idx =>
         val key = headers(idx).getAs[String]("key")
         val value = headers(idx).getAs[Array[Byte]]("value")
         assert(key === expected(idx)._1)
diff --git a/connector/kafka-0-10/src/test/scala/org/apache/spark/streaming/kafka010/KafkaDataConsumerSuite.scala b/connector/kafka-0-10/src/test/scala/org/apache/spark/streaming/kafka010/KafkaDataConsumerSuite.scala
index 9c461e73875..a885c8064a7 100644
--- a/connector/kafka-0-10/src/test/scala/org/apache/spark/streaming/kafka010/KafkaDataConsumerSuite.scala
+++ b/connector/kafka-0-10/src/test/scala/org/apache/spark/streaming/kafka010/KafkaDataConsumerSuite.scala
@@ -133,7 +133,7 @@ class KafkaDataConsumerSuite extends SparkFunSuite with MockitoSugar with Before
       val consumer = KafkaDataConsumer.acquire[Array[Byte], Array[Byte]](
         topicPartition, kafkaParams, taskContext, useCache)
       try {
-        val rcvd = (0 until data.length).map { offset =>
+        val rcvd = data.indices.map { offset =>
           val bytes = consumer.get(offset, 10000).value()
           new String(bytes)
         }
diff --git a/connector/kinesis-asl/src/test/scala/org/apache/spark/streaming/kinesis/KinesisBackedBlockRDDSuite.scala b/connector/kinesis-asl/src/test/scala/org/apache/spark/streaming/kinesis/KinesisBackedBlockRDDSuite.scala
index 12d950096b4..4b3b7454b86 100644
--- a/connector/kinesis-asl/src/test/scala/org/apache/spark/streaming/kinesis/KinesisBackedBlockRDDSuite.scala
+++ b/connector/kinesis-asl/src/test/scala/org/apache/spark/streaming/kinesis/KinesisBackedBlockRDDSuite.scala
@@ -96,7 +96,7 @@ abstract class KinesisBackedBlockRDDTests(aggregateTestData: Boolean)
       allRanges.map { range => SequenceNumberRanges(Array(range)) }.toArray
     ).map { bytes => new String(bytes).toInt }.collectPartitions()
     assert(receivedData3.length === allRanges.size)
-    for (i <- 0 until allRanges.size) {
+    for (i <- allRanges.indices) {
       assert(receivedData3(i).toSeq === shardIdToData(allRanges(i).shardId))
     }
   }
diff --git a/core/src/main/scala/org/apache/spark/MapOutputTracker.scala b/core/src/main/scala/org/apache/spark/MapOutputTracker.scala
index 79cae483b22..b9be9307ec4 100644
--- a/core/src/main/scala/org/apache/spark/MapOutputTracker.scala
+++ b/core/src/main/scala/org/apache/spark/MapOutputTracker.scala
@@ -965,7 +965,7 @@ private[spark] class MapOutputTrackerMaster(
         statuses.length.toLong * totalSizes.length / parallelAggThreshold + 1).toInt
       if (parallelism <= 1) {
         statuses.filter(_ != null).foreach { s =>
-          for (i <- 0 until totalSizes.length) {
+          for (i <- totalSizes.indices) {
             totalSizes(i) += s.getSizeForBlock(i)
           }
         }
diff --git a/core/src/main/scala/org/apache/spark/SparkContext.scala b/core/src/main/scala/org/apache/spark/SparkContext.scala
index a106d5bacc5..f771dbf1c01 100644
--- a/core/src/main/scala/org/apache/spark/SparkContext.scala
+++ b/core/src/main/scala/org/apache/spark/SparkContext.scala
@@ -2277,7 +2277,7 @@ class SparkContext(config: SparkConf) extends Logging {
    * a result from one partition)
    */
   def runJob[T, U: ClassTag](rdd: RDD[T], func: (TaskContext, Iterator[T]) => U): Array[U] = {
-    runJob(rdd, func, 0 until rdd.partitions.length)
+    runJob(rdd, func, rdd.partitions.indices)
   }
 
   /**
@@ -2289,7 +2289,7 @@ class SparkContext(config: SparkConf) extends Logging {
    * a result from one partition)
    */
   def runJob[T, U: ClassTag](rdd: RDD[T], func: Iterator[T] => U): Array[U] = {
-    runJob(rdd, func, 0 until rdd.partitions.length)
+    runJob(rdd, func, rdd.partitions.indices)
   }
 
   /**
@@ -2304,7 +2304,7 @@ class SparkContext(config: SparkConf) extends Logging {
     rdd: RDD[T],
     processPartition: (TaskContext, Iterator[T]) => U,
     resultHandler: (Int, U) => Unit): Unit = {
-    runJob[T, U](rdd, processPartition, 0 until rdd.partitions.length, resultHandler)
+    runJob[T, U](rdd, processPartition, rdd.partitions.indices, resultHandler)
   }
 
   /**
@@ -2319,7 +2319,7 @@ class SparkContext(config: SparkConf) extends Logging {
       processPartition: Iterator[T] => U,
       resultHandler: (Int, U) => Unit): Unit = {
     val processFunc = (context: TaskContext, iter: Iterator[T]) => processPartition(iter)
-    runJob[T, U](rdd, processFunc, 0 until rdd.partitions.length, resultHandler)
+    runJob[T, U](rdd, processFunc, rdd.partitions.indices, resultHandler)
   }
 
   /**
diff --git a/core/src/main/scala/org/apache/spark/deploy/master/Master.scala b/core/src/main/scala/org/apache/spark/deploy/master/Master.scala
index 775b27bcbf2..29399406980 100644
--- a/core/src/main/scala/org/apache/spark/deploy/master/Master.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/master/Master.scala
@@ -742,7 +742,7 @@ private[deploy] class Master(
         val assignedCores = scheduleExecutorsOnWorkers(app, usableWorkers, spreadOutApps)
 
         // Now that we've decided how many cores to allocate on each worker, let's allocate them
-        for (pos <- 0 until usableWorkers.length if assignedCores(pos) > 0) {
+        for (pos <- usableWorkers.indices if assignedCores(pos) > 0) {
           allocateWorkerResourceToExecutors(
             app, assignedCores(pos), app.desc.coresPerExecutor, usableWorkers(pos))
         }
diff --git a/core/src/main/scala/org/apache/spark/metrics/ExecutorMetricType.scala b/core/src/main/scala/org/apache/spark/metrics/ExecutorMetricType.scala
index a536919709c..648532faa3a 100644
--- a/core/src/main/scala/org/apache/spark/metrics/ExecutorMetricType.scala
+++ b/core/src/main/scala/org/apache/spark/metrics/ExecutorMetricType.scala
@@ -208,7 +208,7 @@ private[spark] object ExecutorMetricType {
     var numberOfMetrics = 0
     val definedMetricsAndOffset = mutable.LinkedHashMap.empty[String, Int]
     metricGetters.foreach { m =>
-      (0 until m.names.length).foreach { idx =>
+      m.names.indices.foreach { idx =>
         definedMetricsAndOffset += (m.names(idx) -> (idx + numberOfMetrics))
       }
       numberOfMetrics += m.names.length
diff --git a/core/src/main/scala/org/apache/spark/rdd/BlockRDD.scala b/core/src/main/scala/org/apache/spark/rdd/BlockRDD.scala
index 05cad3d7897..67411955077 100644
--- a/core/src/main/scala/org/apache/spark/rdd/BlockRDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/BlockRDD.scala
@@ -36,7 +36,7 @@ class BlockRDD[T: ClassTag](sc: SparkContext, @transient val blockIds: Array[Blo
 
   override def getPartitions: Array[Partition] = {
     assertValid()
-    (0 until blockIds.length).map { i =>
+    blockIds.indices.map { i =>
       new BlockRDDPartition(blockIds(i), i).asInstanceOf[Partition]
     }.toArray
   }
diff --git a/core/src/main/scala/org/apache/spark/rdd/CoGroupedRDD.scala b/core/src/main/scala/org/apache/spark/rdd/CoGroupedRDD.scala
index 500d306f336..8bac6e73611 100644
--- a/core/src/main/scala/org/apache/spark/rdd/CoGroupedRDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/CoGroupedRDD.scala
@@ -110,7 +110,7 @@ class CoGroupedRDD[K: ClassTag](
 
   override def getPartitions: Array[Partition] = {
     val array = new Array[Partition](part.numPartitions)
-    for (i <- 0 until array.length) {
+    for (i <- array.indices) {
       // Each CoGroupPartition will have a dependency per contributing RDD
       array(i) = new CoGroupPartition(i, rdds.zipWithIndex.map { case (rdd, j) =>
         // Assume each RDD contributed a single dependency, and get it
diff --git a/core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala b/core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala
index 596298b222e..119fdae531f 100644
--- a/core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala
@@ -156,7 +156,7 @@ class NewHadoopRDD[K, V](
       }
 
       val result = new Array[Partition](rawSplits.size)
-      for (i <- 0 until rawSplits.size) {
+      for (i <- rawSplits.indices) {
         result(i) =
             new NewHadoopPartition(id, i, rawSplits(i).asInstanceOf[InputSplit with Writable])
       }
diff --git a/core/src/main/scala/org/apache/spark/rdd/SubtractedRDD.scala b/core/src/main/scala/org/apache/spark/rdd/SubtractedRDD.scala
index 1dec977f47a..a9061b3fdc9 100644
--- a/core/src/main/scala/org/apache/spark/rdd/SubtractedRDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/SubtractedRDD.scala
@@ -70,7 +70,7 @@ private[spark] class SubtractedRDD[K: ClassTag, V: ClassTag, W: ClassTag](
 
   override def getPartitions: Array[Partition] = {
     val array = new Array[Partition](part.numPartitions)
-    for (i <- 0 until array.length) {
+    for (i <- array.indices) {
       // Each CoGroupPartition will depend on rdd1 and rdd2
       array(i) = new CoGroupPartition(i, Seq(rdd1, rdd2).zipWithIndex.map { case (rdd, j) =>
         dependencies(j) match {
diff --git a/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala b/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala
index 55938d8b77e..d20b534ee63 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala
@@ -380,7 +380,7 @@ private[spark] class TaskSchedulerImpl(
     var minLaunchedLocality: Option[TaskLocality] = None
     // nodes and executors that are excluded for the entire application have already been
     // filtered out by this point
-    for (i <- 0 until shuffledOffers.size) {
+    for (i <- shuffledOffers.indices) {
       val execId = shuffledOffers(i).executorId
       val host = shuffledOffers(i).host
       val taskSetRpID = taskSet.taskSet.resourceProfileId
diff --git a/core/src/main/scala/org/apache/spark/storage/BlockManager.scala b/core/src/main/scala/org/apache/spark/storage/BlockManager.scala
index d5901888d1a..619b5e1edf7 100644
--- a/core/src/main/scala/org/apache/spark/storage/BlockManager.scala
+++ b/core/src/main/scala/org/apache/spark/storage/BlockManager.scala
@@ -2040,7 +2040,7 @@ private[spark] object BlockManager {
     }
 
     val blockManagers = new HashMap[BlockId, Seq[String]]
-    for (i <- 0 until blockIds.length) {
+    for (i <- blockIds.indices) {
       blockManagers(blockIds(i)) = blockLocations(i).map { loc =>
         ExecutorCacheTaskLocation(loc.host, loc.executorId).toString
       }
diff --git a/core/src/main/scala/org/apache/spark/storage/memory/MemoryStore.scala b/core/src/main/scala/org/apache/spark/storage/memory/MemoryStore.scala
index 144d8cff7d4..6f8defaa288 100644
--- a/core/src/main/scala/org/apache/spark/storage/memory/MemoryStore.scala
+++ b/core/src/main/scala/org/apache/spark/storage/memory/MemoryStore.scala
@@ -504,7 +504,7 @@ private[spark] class MemoryStore(
         try {
           logInfo(s"${selectedBlocks.size} blocks selected for dropping " +
             s"(${Utils.bytesToString(freedMemory)} bytes)")
-          (0 until selectedBlocks.size).foreach { idx =>
+          selectedBlocks.indices.foreach { idx =>
             val blockId = selectedBlocks(idx)
             val entry = entries.synchronized {
               entries.get(blockId)
diff --git a/core/src/test/scala/org/apache/spark/BarrierStageOnSubmittedSuite.scala b/core/src/test/scala/org/apache/spark/BarrierStageOnSubmittedSuite.scala
index 1ba13c2ef18..13d479e35aa 100644
--- a/core/src/test/scala/org/apache/spark/BarrierStageOnSubmittedSuite.scala
+++ b/core/src/test/scala/org/apache/spark/BarrierStageOnSubmittedSuite.scala
@@ -48,7 +48,7 @@ class BarrierStageOnSubmittedSuite extends SparkFunSuite with LocalSparkContext
     val futureAction = sc.submitJob(
       rdd,
       (iter: Iterator[Int]) => iter.toArray,
-      partitions.getOrElse(0 until rdd.partitions.length),
+      partitions.getOrElse(rdd.partitions.indices),
       { case (_, _) => return }: (Int, Array[Int]) => Unit,
       { return }
     )
diff --git a/core/src/test/scala/org/apache/spark/rdd/ParallelCollectionSplitSuite.scala b/core/src/test/scala/org/apache/spark/rdd/ParallelCollectionSplitSuite.scala
index 879107350bb..2b57f8c8f6f 100644
--- a/core/src/test/scala/org/apache/spark/rdd/ParallelCollectionSplitSuite.scala
+++ b/core/src/test/scala/org/apache/spark/rdd/ParallelCollectionSplitSuite.scala
@@ -117,7 +117,7 @@ class ParallelCollectionSplitSuite extends SparkFunSuite with Checkers {
     val r = ParallelCollectionRDD.slice(1 to 7, 4)
     val nr = ParallelCollectionRDD.slice(1L to 7L, 4)
     assert(r.size === 4)
-    for (i <- 0 until r.size) {
+    for (i <- r.indices) {
       assert(r(i).size === nr(i).size)
     }
   }
@@ -126,7 +126,7 @@ class ParallelCollectionSplitSuite extends SparkFunSuite with Checkers {
     val r = ParallelCollectionRDD.slice(List(1, 2), 4)
     val nr = ParallelCollectionRDD.slice(1L to 2L, 4)
     assert(r.size === 4)
-    for (i <- 0 until r.size) {
+    for (i <- r.indices) {
       assert(r(i).size === nr(i).size)
     }
   }
diff --git a/core/src/test/scala/org/apache/spark/scheduler/AQEShuffledRDD.scala b/core/src/test/scala/org/apache/spark/scheduler/AQEShuffledRDD.scala
index ae5e0e9179f..3f8eaede6e7 100644
--- a/core/src/test/scala/org/apache/spark/scheduler/AQEShuffledRDD.scala
+++ b/core/src/test/scala/org/apache/spark/scheduler/AQEShuffledRDD.scala
@@ -38,7 +38,7 @@ class CoalescedPartitioner(val parent: Partitioner, val partitionStartIndices: A
   @transient private lazy val parentPartitionMapping: Array[Int] = {
     val n = parent.numPartitions
     val result = new Array[Int](n)
-    for (i <- 0 until partitionStartIndices.length) {
+    for (i <- partitionStartIndices.indices) {
       val start = partitionStartIndices(i)
       val end = if (i < partitionStartIndices.length - 1) partitionStartIndices(i + 1) else n
       for (j <- start until end) {
diff --git a/core/src/test/scala/org/apache/spark/scheduler/CoarseGrainedSchedulerBackendSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/CoarseGrainedSchedulerBackendSuite.scala
index 05fecffc647..17c9f2e9dff 100644
--- a/core/src/test/scala/org/apache/spark/scheduler/CoarseGrainedSchedulerBackendSuite.scala
+++ b/core/src/test/scala/org/apache/spark/scheduler/CoarseGrainedSchedulerBackendSuite.scala
@@ -408,7 +408,7 @@ class CoarseGrainedSchedulerBackendSuite extends SparkFunSuite with LocalSparkCo
     sc.submitJob(
       rdd,
       (iter: Iterator[Int]) => iter.toArray,
-      0 until rdd.partitions.length,
+      rdd.partitions.indices,
       { case (_, _) => return }: (Int, Array[Int]) => Unit,
       { return }
     )
diff --git a/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala
index e51ced860db..fc7aa06e41e 100644
--- a/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala
+++ b/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala
@@ -1806,7 +1806,7 @@ class DAGSchedulerSuite extends SparkFunSuite with TempLocalSparkContext with Ti
     // now we should submit stage 1, and the map output from stage 0 should be registered
 
     // check that we have all the map output for stage 0
-    (0 until reduceRdd.partitions.length).foreach { reduceIdx =>
+    reduceRdd.partitions.indices.foreach { reduceIdx =>
       val statuses = mapOutputTracker.getMapSizesByExecutorId(0, reduceIdx)
       // really we should have already thrown an exception rather than fail either of these
       // asserts, but just to be extra defensive let's double check the statuses are OK
diff --git a/core/src/test/scala/org/apache/spark/scheduler/OutputCommitCoordinatorSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/OutputCommitCoordinatorSuite.scala
index 728b9d65054..360f7e1b4e4 100644
--- a/core/src/test/scala/org/apache/spark/scheduler/OutputCommitCoordinatorSuite.scala
+++ b/core/src/test/scala/org/apache/spark/scheduler/OutputCommitCoordinatorSuite.scala
@@ -139,14 +139,14 @@ class OutputCommitCoordinatorSuite extends SparkFunSuite with BeforeAndAfter {
   test("Only one of two duplicate commit tasks should commit") {
     val rdd = sc.parallelize(Seq(1), 1)
     sc.runJob(rdd, OutputCommitFunctions(tempDir.getAbsolutePath).commitSuccessfully _,
-      0 until rdd.partitions.size)
+      rdd.partitions.indices)
     assert(tempDir.list().size === 1)
   }
 
   test("If commit fails, if task is retried it should not be locked, and will succeed.") {
     val rdd = sc.parallelize(Seq(1), 1)
     sc.runJob(rdd, OutputCommitFunctions(tempDir.getAbsolutePath).failFirstCommitAttempt _,
-      0 until rdd.partitions.size)
+      rdd.partitions.indices)
     assert(tempDir.list().size === 1)
   }
 
diff --git a/core/src/test/scala/org/apache/spark/util/FileAppenderSuite.scala b/core/src/test/scala/org/apache/spark/util/FileAppenderSuite.scala
index 8ca4bc9a152..e79e3256fe1 100644
--- a/core/src/test/scala/org/apache/spark/util/FileAppenderSuite.scala
+++ b/core/src/test/scala/org/apache/spark/util/FileAppenderSuite.scala
@@ -178,7 +178,7 @@ class FileAppenderSuite extends SparkFunSuite with BeforeAndAfter with Logging {
     // send data to appender through the input stream, and wait for the data to be written
     val allGeneratedFiles = new HashSet[String]()
     val items = (1 to 10).map { _.toString * 10000 }
-    for (i <- 0 until items.size) {
+    for (i <- items.indices) {
       testOutputStream.write(items(i).getBytes(StandardCharsets.UTF_8))
       testOutputStream.flush()
       allGeneratedFiles ++= RollingFileAppender.getSortedRolledOverFiles(
@@ -364,7 +364,7 @@ class FileAppenderSuite extends SparkFunSuite with BeforeAndAfter with Logging {
     ): Seq[File] = {
     // send data to appender through the input stream, and wait for the data to be written
     val expectedText = textToAppend.mkString("")
-    for (i <- 0 until textToAppend.size) {
+    for (i <- textToAppend.indices) {
       outputStream.write(textToAppend(i).getBytes(StandardCharsets.UTF_8))
       outputStream.flush()
       Thread.sleep(sleepTimeBetweenTexts)
diff --git a/core/src/test/scala/org/apache/spark/util/JsonProtocolSuite.scala b/core/src/test/scala/org/apache/spark/util/JsonProtocolSuite.scala
index 572ac6fc8bb..ea6267698c8 100644
--- a/core/src/test/scala/org/apache/spark/util/JsonProtocolSuite.scala
+++ b/core/src/test/scala/org/apache/spark/util/JsonProtocolSuite.scala
@@ -771,7 +771,7 @@ private[spark] object JsonProtocolSuite extends Assertions {
     assert(info1.submissionTime === info2.submissionTime)
     assert(info1.completionTime === info2.completionTime)
     assert(info1.rddInfos.size === info2.rddInfos.size)
-    (0 until info1.rddInfos.size).foreach { i =>
+    info1.rddInfos.indices.foreach { i =>
       assertEquals(info1.rddInfos(i), info2.rddInfos(i))
     }
     assert(info1.accumulables === info2.accumulables)
diff --git a/core/src/test/scala/org/apache/spark/util/collection/unsafe/sort/PrefixComparatorsSuite.scala b/core/src/test/scala/org/apache/spark/util/collection/unsafe/sort/PrefixComparatorsSuite.scala
index a55004f664a..9660ec90736 100644
--- a/core/src/test/scala/org/apache/spark/util/collection/unsafe/sort/PrefixComparatorsSuite.scala
+++ b/core/src/test/scala/org/apache/spark/util/collection/unsafe/sort/PrefixComparatorsSuite.scala
@@ -62,7 +62,7 @@ class PrefixComparatorsSuite extends SparkFunSuite with ScalaCheckPropertyChecks
   test("Binary prefix comparator") {
 
      def compareBinary(x: Array[Byte], y: Array[Byte]): Int = {
-      for (i <- 0 until x.length; if i < y.length) {
+      for (i <- x.indices; if i < y.length) {
         val v1 = x(i) & 0xff
         val v2 = y(i) & 0xff
         val res = v1 - v2
diff --git a/examples/src/main/scala/org/apache/spark/examples/MultiBroadcastTest.scala b/examples/src/main/scala/org/apache/spark/examples/MultiBroadcastTest.scala
index 4bea5cae775..0692c5164b9 100644
--- a/examples/src/main/scala/org/apache/spark/examples/MultiBroadcastTest.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/MultiBroadcastTest.scala
@@ -37,12 +37,12 @@ object MultiBroadcastTest {
     val num = if (args.length > 1) args(1).toInt else 1000000
 
     val arr1 = new Array[Int](num)
-    for (i <- 0 until arr1.length) {
+    for (i <- arr1.indices) {
       arr1(i) = i
     }
 
     val arr2 = new Array[Int](num)
-    for (i <- 0 until arr2.length) {
+    for (i <- arr2.indices) {
       arr2(i) = i
     }
 
diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkKMeans.scala b/examples/src/main/scala/org/apache/spark/examples/SparkKMeans.scala
index cf03e0203f7..e0ab07acf7e 100644
--- a/examples/src/main/scala/org/apache/spark/examples/SparkKMeans.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/SparkKMeans.scala
@@ -38,7 +38,7 @@ object SparkKMeans {
     var bestIndex = 0
     var closest = Double.PositiveInfinity
 
-    for (i <- 0 until centers.length) {
+    for (i <- centers.indices) {
       val tempDist = squaredDistance(p, centers(i))
       if (tempDist < closest) {
         closest = tempDist
diff --git a/graphx/src/main/scala/org/apache/spark/graphx/impl/ShippableVertexPartition.scala b/graphx/src/main/scala/org/apache/spark/graphx/impl/ShippableVertexPartition.scala
index 184b96426fa..2bb673c48a4 100644
--- a/graphx/src/main/scala/org/apache/spark/graphx/impl/ShippableVertexPartition.scala
+++ b/graphx/src/main/scala/org/apache/spark/graphx/impl/ShippableVertexPartition.scala
@@ -28,7 +28,7 @@ private[graphx]
 class VertexAttributeBlock[VD: ClassTag](val vids: Array[VertexId], val attrs: Array[VD])
   extends Serializable {
   def iterator: Iterator[(VertexId, VD)] =
-    (0 until vids.length).iterator.map { i => (vids(i), attrs(i)) }
+    vids.indices.iterator.map { i => (vids(i), attrs(i)) }
 }
 
 private[graphx]
diff --git a/graphx/src/test/scala/org/apache/spark/graphx/EdgeSuite.scala b/graphx/src/test/scala/org/apache/spark/graphx/EdgeSuite.scala
index 4d6b899c83a..6f38dbe84ef 100644
--- a/graphx/src/test/scala/org/apache/spark/graphx/EdgeSuite.scala
+++ b/graphx/src/test/scala/org/apache/spark/graphx/EdgeSuite.scala
@@ -32,7 +32,7 @@ class EdgeSuite extends SparkFunSuite {
     // to ascending order
     val sortedEdges = testEdges.sorted(Edge.lexicographicOrdering[Int])
 
-    for (i <- 0 until testEdges.length) {
+    for (i <- testEdges.indices) {
       assert(sortedEdges(i) == testEdges(testEdges.length - i - 1))
     }
   }
diff --git a/mllib/src/main/scala/org/apache/spark/ml/ann/Layer.scala b/mllib/src/main/scala/org/apache/spark/ml/ann/Layer.scala
index 253d4083de7..c31a99dd4fd 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/ann/Layer.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/ann/Layer.scala
@@ -480,7 +480,7 @@ private[ml] class FeedForwardModel private(
   val layers = topology.layers
   val layerModels = new Array[LayerModel](layers.length)
   private var offset = 0
-  for (i <- 0 until layers.length) {
+  for (i <- layers.indices) {
     layerModels(i) = layers(i).createModel(
       new BDV[Double](weights.toArray, offset, 1, layers(i).weightSize))
     offset += layers(i).weightSize
@@ -495,7 +495,7 @@ private[ml] class FeedForwardModel private(
     if (outputs == null || outputs(0).cols != currentBatchSize) {
       outputs = new Array[BDM[Double]](layers.length)
       var inputSize = data.rows
-      for (i <- 0 until layers.length) {
+      for (i <- layers.indices) {
         if (layers(i).inPlace) {
           outputs(i) = outputs(i - 1)
         } else {
@@ -542,7 +542,7 @@ private[ml] class FeedForwardModel private(
     }
     val cumGradientArray = cumGradient.toArray
     var offset = 0
-    for (i <- 0 until layerModels.length) {
+    for (i <- layerModels.indices) {
       val input = if (i == 0) data else outputs(i - 1)
       layerModels(i).grad(deltas(i), input,
         new BDV[Double](cumGradientArray, offset, 1, layers(i).weightSize))
@@ -601,7 +601,7 @@ private[ann] object FeedForwardModel {
     val weights = BDV.zeros[Double](topology.layers.map(_.weightSize).sum)
     var offset = 0
     val random = new XORShiftRandom(seed)
-    for (i <- 0 until layers.length) {
+    for (i <- layers.indices) {
       layerModels(i) = layers(i).
         initModel(new BDV[Double](weights.data, offset, 1, layers(i).weightSize), random)
       offset += layers(i).weightSize
diff --git a/mllib/src/main/scala/org/apache/spark/ml/feature/QuantileDiscretizer.scala b/mllib/src/main/scala/org/apache/spark/ml/feature/QuantileDiscretizer.scala
index b93c9b1fcd2..4d60172e800 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/feature/QuantileDiscretizer.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/feature/QuantileDiscretizer.scala
@@ -243,7 +243,7 @@ final class QuantileDiscretizer @Since("1.6.0") (@Since("1.6.0") override val ui
     // non-deterministic results when array contains both 0.0 and -0.0
     // So that here we should first normalize all 0.0 and -0.0 to be 0.0
     // See https://github.com/scala/bug/issues/11995
-    for (i <- 0 until splits.length) {
+    for (i <- splits.indices) {
       if (splits(i) == -0.0) {
         splits(i) = 0.0
       }
diff --git a/mllib/src/main/scala/org/apache/spark/ml/feature/StringIndexer.scala b/mllib/src/main/scala/org/apache/spark/ml/feature/StringIndexer.scala
index 0ca88b8e61e..98a42371d29 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/feature/StringIndexer.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/feature/StringIndexer.scala
@@ -367,7 +367,7 @@ class StringIndexerModel (
   // This filters out any null values and also the input labels which are not in
   // the dataset used for fitting.
   private def filterInvalidData(dataset: Dataset[_], inputColNames: Seq[String]): Dataset[_] = {
-    val conditions: Seq[Column] = (0 until inputColNames.length).map { i =>
+    val conditions: Seq[Column] = inputColNames.indices.map { i =>
       val inputColName = inputColNames(i)
       val labelToIndex = labelsToIndexArray(i)
       // We have this additional lookup at `labelToIndex` when `handleInvalid` is set to
@@ -423,7 +423,7 @@ class StringIndexerModel (
       dataset
     }
 
-    for (i <- 0 until outputColNames.length) {
+    for (i <- outputColNames.indices) {
       val inputColName = inputColNames(i)
       val outputColName = outputColNames(i)
       val labelToIndex = labelsToIndexArray(i)
diff --git a/mllib/src/main/scala/org/apache/spark/ml/tuning/CrossValidator.scala b/mllib/src/main/scala/org/apache/spark/ml/tuning/CrossValidator.scala
index 275d3c5510f..e04a8c1389b 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/tuning/CrossValidator.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/tuning/CrossValidator.scala
@@ -410,7 +410,7 @@ object CrossValidatorModel extends MLReadable[CrossValidatorModel] {
         val subModelsPath = new Path(path, "subModels")
         for (splitIndex <- 0 until instance.getNumFolds) {
           val splitPath = new Path(subModelsPath, s"fold${splitIndex.toString}")
-          for (paramIndex <- 0 until instance.getEstimatorParamMaps.length) {
+          for (paramIndex <- instance.getEstimatorParamMaps.indices) {
             val modelPath = new Path(splitPath, paramIndex.toString).toString
             instance.subModels(splitIndex)(paramIndex).asInstanceOf[MLWritable].save(modelPath)
           }
@@ -442,7 +442,7 @@ object CrossValidatorModel extends MLReadable[CrossValidatorModel] {
           Array.ofDim[Model[_]](estimatorParamMaps.length))
         for (splitIndex <- 0 until numFolds) {
           val splitPath = new Path(subModelsPath, s"fold${splitIndex.toString}")
-          for (paramIndex <- 0 until estimatorParamMaps.length) {
+          for (paramIndex <- estimatorParamMaps.indices) {
             val modelPath = new Path(splitPath, paramIndex.toString).toString
             _subModels(splitIndex)(paramIndex) =
               DefaultParamsReader.loadParamsInstance(modelPath, sc)
diff --git a/mllib/src/main/scala/org/apache/spark/ml/tuning/TrainValidationSplit.scala b/mllib/src/main/scala/org/apache/spark/ml/tuning/TrainValidationSplit.scala
index 488bff1409f..4a6d5164aa0 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/tuning/TrainValidationSplit.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/tuning/TrainValidationSplit.scala
@@ -372,7 +372,7 @@ object TrainValidationSplitModel extends MLReadable[TrainValidationSplitModel] {
           "persistSubModels to true if the tuning was done with collectSubModels set to true. " +
           "To save the sub-models, try rerunning fitting with collectSubModels set to true.")
         val subModelsPath = new Path(path, "subModels")
-        for (paramIndex <- 0 until instance.getEstimatorParamMaps.length) {
+        for (paramIndex <- instance.getEstimatorParamMaps.indices) {
           val modelPath = new Path(subModelsPath, paramIndex.toString).toString
           instance.subModels(paramIndex).asInstanceOf[MLWritable].save(modelPath)
         }
@@ -399,7 +399,7 @@ object TrainValidationSplitModel extends MLReadable[TrainValidationSplitModel] {
       val subModels: Option[Array[Model[_]]] = if (persistSubModels) {
         val subModelsPath = new Path(path, "subModels")
         val _subModels = Array.ofDim[Model[_]](estimatorParamMaps.length)
-        for (paramIndex <- 0 until estimatorParamMaps.length) {
+        for (paramIndex <- estimatorParamMaps.indices) {
           val modelPath = new Path(subModelsPath, paramIndex.toString).toString
           _subModels(paramIndex) =
             DefaultParamsReader.loadParamsInstance(modelPath, sc)
diff --git a/mllib/src/test/scala/org/apache/spark/ml/classification/NaiveBayesSuite.scala b/mllib/src/test/scala/org/apache/spark/ml/classification/NaiveBayesSuite.scala
index aff179a6472..bed45fc68f4 100644
--- a/mllib/src/test/scala/org/apache/spark/ml/classification/NaiveBayesSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/ml/classification/NaiveBayesSuite.scala
@@ -585,7 +585,7 @@ object NaiveBayesSuite {
 
   private def calcLabel(p: Double, pi: Array[Double]): Int = {
     var sum = 0.0
-    for (j <- 0 until pi.length) {
+    for (j <- pi.indices) {
       sum += pi(j)
       if (p < sum) return j
     }
diff --git a/mllib/src/test/scala/org/apache/spark/ml/feature/BucketizerSuite.scala b/mllib/src/test/scala/org/apache/spark/ml/feature/BucketizerSuite.scala
index 9ea15e19185..97d95c7cd73 100644
--- a/mllib/src/test/scala/org/apache/spark/ml/feature/BucketizerSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/ml/feature/BucketizerSuite.scala
@@ -208,7 +208,7 @@ class BucketizerSuite extends MLTest with DefaultReadWriteTest {
     val expectedBuckets1 = Array(0.0, 0.0, 1.0, 1.0)
     val expectedBuckets2 = Array(1.0, 1.0, 0.0, 0.0)
 
-    val data = (0 until validData1.length).map { idx =>
+    val data = validData1.indices.map { idx =>
       (validData1(idx), validData2(idx), expectedBuckets1(idx), expectedBuckets2(idx))
     }
     val dataFrame: DataFrame = data.toDF("feature1", "feature2", "expected1", "expected2")
@@ -256,7 +256,7 @@ class BucketizerSuite extends MLTest with DefaultReadWriteTest {
     val expectedBuckets1 = Array(0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0)
     val expectedBuckets2 = Array(1.0, 0.0, 1.0, 1.0, 1.0, 2.0, 3.0)
 
-    val data = (0 until validData1.length).map { idx =>
+    val data = validData1.indices.map { idx =>
       (validData1(idx), validData2(idx), expectedBuckets1(idx), expectedBuckets2(idx))
     }
     val dataFrame: DataFrame = data.toDF("feature1", "feature2", "expected1", "expected2")
@@ -281,7 +281,7 @@ class BucketizerSuite extends MLTest with DefaultReadWriteTest {
     val expectedBuckets1 = Array(0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, 4.0)
     val expectedBuckets2 = Array(2.0, 1.0, 2.0, 1.0, 1.0, 2.0, 2.0, 3.0, 4.0, 4.0)
 
-    val data = (0 until validData1.length).map { idx =>
+    val data = validData1.indices.map { idx =>
       (validData1(idx), validData2(idx), expectedBuckets1(idx), expectedBuckets2(idx))
     }
     val dataFrame: DataFrame = data.toDF("feature1", "feature2", "expected1", "expected2")
diff --git a/mllib/src/test/scala/org/apache/spark/mllib/classification/NaiveBayesSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/classification/NaiveBayesSuite.scala
index b9d83dd2b81..41c8feb8a6b 100644
--- a/mllib/src/test/scala/org/apache/spark/mllib/classification/NaiveBayesSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/mllib/classification/NaiveBayesSuite.scala
@@ -36,7 +36,7 @@ object NaiveBayesSuite {
 
   private def calcLabel(p: Double, pi: Array[Double]): Int = {
     var sum = 0.0
-    for (j <- 0 until pi.length) {
+    for (j <- pi.indices) {
       sum += pi(j)
       if (p < sum) return j
     }
diff --git a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala
index b15623ceff5..31c25688480 100644
--- a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala
+++ b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala
@@ -187,7 +187,7 @@ private[spark] class ApplicationMaster(
     val visibilities = distCacheConf.get(CACHED_FILES_VISIBILITIES)
     val resTypes = distCacheConf.get(CACHED_FILES_TYPES)
 
-    for (i <- 0 to distFiles.size - 1) {
+    for (i <- distFiles.indices) {
       val resType = LocalResourceType.valueOf(resTypes(i))
       setupDistributedCache(distFiles(i), resType, timeStamps(i).toString, fileSizes(i).toString,
       visibilities(i))
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveDefaultColumns.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveDefaultColumns.scala
index 6fbadd2b3a1..e47a1230a7e 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveDefaultColumns.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveDefaultColumns.scala
@@ -358,7 +358,7 @@ case class ResolveDefaultColumns(
     val updated: Seq[Seq[Expression]] = {
       table.rows.map { row: Seq[Expression] =>
         for {
-          i <- 0 until row.size
+          i <- row.indices
           expr = row(i)
           defaultExpr = if (i < defaultExpressions.size) defaultExpressions(i) else Literal(null)
         } yield replaceExplicitDefaultReferenceInExpression(
@@ -384,7 +384,7 @@ case class ResolveDefaultColumns(
     var replaced = false
     val updated: Seq[NamedExpression] = {
       for {
-        i <- 0 until project.projectList.size
+        i <- project.projectList.indices
         projectExpr = project.projectList(i)
         defaultExpr = if (i < defaultExpressions.size) defaultExpressions(i) else Literal(null)
       } yield replaceExplicitDefaultReferenceInExpression(
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/collectionOperations.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/collectionOperations.scala
index 9254cbefeb2..eac2c915d91 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/collectionOperations.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/collectionOperations.scala
@@ -3623,7 +3623,7 @@ case class ArrayDistinct(child: Expression)
       val array = data.toArray[AnyRef](elementType)
       val arrayBuffer = new scala.collection.mutable.ArrayBuffer[AnyRef]
       var alreadyStoredNull = false
-      for (i <- 0 until array.length) {
+      for (i <- array.indices) {
         if (array(i) != null) {
           var found = false
           var j = 0
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/ToNumberParser.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/ToNumberParser.scala
index 430eb3daa4f..c54eeb1bf63 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/ToNumberParser.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/ToNumberParser.scala
@@ -532,7 +532,7 @@ class ToNumberParser(numberFormat: String, errorOnFail: Boolean) extends Seriali
       // The input contains more thousands separators than the format string.
       return None
     }
-    for (i <- 0 until expectedDigits.length) {
+    for (i <- expectedDigits.indices) {
       val expectedToken: Digits = expectedDigits(i)
       val actualNumDigits: Int =
         if (i < parsedDigitGroupSizes.length) {
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/PredicateSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/PredicateSuite.scala
index c34b37d7c6c..d75f443f5a4 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/PredicateSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/PredicateSuite.scala
@@ -392,7 +392,7 @@ class PredicateSuite extends SparkFunSuite with ExpressionEvalHelper {
   }
 
   test("BinaryComparison: lessThan") {
-    for (i <- 0 until smallValues.length) {
+    for (i <- smallValues.indices) {
       checkEvaluation(LessThan(smallValues(i), largeValues(i)), true)
       checkEvaluation(LessThan(equalValues1(i), equalValues2(i)), false)
       checkEvaluation(LessThan(largeValues(i), smallValues(i)), false)
@@ -400,7 +400,7 @@ class PredicateSuite extends SparkFunSuite with ExpressionEvalHelper {
   }
 
   test("BinaryComparison: LessThanOrEqual") {
-    for (i <- 0 until smallValues.length) {
+    for (i <- smallValues.indices) {
       checkEvaluation(LessThanOrEqual(smallValues(i), largeValues(i)), true)
       checkEvaluation(LessThanOrEqual(equalValues1(i), equalValues2(i)), true)
       checkEvaluation(LessThanOrEqual(largeValues(i), smallValues(i)), false)
@@ -408,7 +408,7 @@ class PredicateSuite extends SparkFunSuite with ExpressionEvalHelper {
   }
 
   test("BinaryComparison: GreaterThan") {
-    for (i <- 0 until smallValues.length) {
+    for (i <- smallValues.indices) {
       checkEvaluation(GreaterThan(smallValues(i), largeValues(i)), false)
       checkEvaluation(GreaterThan(equalValues1(i), equalValues2(i)), false)
       checkEvaluation(GreaterThan(largeValues(i), smallValues(i)), true)
@@ -416,7 +416,7 @@ class PredicateSuite extends SparkFunSuite with ExpressionEvalHelper {
   }
 
   test("BinaryComparison: GreaterThanOrEqual") {
-    for (i <- 0 until smallValues.length) {
+    for (i <- smallValues.indices) {
       checkEvaluation(GreaterThanOrEqual(smallValues(i), largeValues(i)), false)
       checkEvaluation(GreaterThanOrEqual(equalValues1(i), equalValues2(i)), true)
       checkEvaluation(GreaterThanOrEqual(largeValues(i), smallValues(i)), true)
@@ -424,7 +424,7 @@ class PredicateSuite extends SparkFunSuite with ExpressionEvalHelper {
   }
 
   test("BinaryComparison: EqualTo") {
-    for (i <- 0 until smallValues.length) {
+    for (i <- smallValues.indices) {
       checkEvaluation(EqualTo(smallValues(i), largeValues(i)), false)
       checkEvaluation(EqualTo(equalValues1(i), equalValues2(i)), true)
       checkEvaluation(EqualTo(largeValues(i), smallValues(i)), false)
@@ -432,7 +432,7 @@ class PredicateSuite extends SparkFunSuite with ExpressionEvalHelper {
   }
 
   test("BinaryComparison: EqualNullSafe") {
-    for (i <- 0 until smallValues.length) {
+    for (i <- smallValues.indices) {
       checkEvaluation(EqualNullSafe(smallValues(i), largeValues(i)), false)
       checkEvaluation(EqualNullSafe(equalValues1(i), equalValues2(i)), true)
       checkEvaluation(EqualNullSafe(largeValues(i), smallValues(i)), false)
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/InMemoryPartitionTable.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/InMemoryPartitionTable.scala
index 671d22040e1..660140e282e 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/InMemoryPartitionTable.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/InMemoryPartitionTable.scala
@@ -102,7 +102,7 @@ class InMemoryPartitionTable(
     val dataTypes = names.map(schema(_).dataType)
     val currentRow = new GenericInternalRow(new Array[Any](names.length))
     memoryTablePartitions.keySet().asScala.filter { key =>
-      for (i <- 0 until names.length) {
+      for (i <- names.indices) {
         currentRow.values(i) = key.get(indexes(i), dataTypes(i))
       }
       currentRow == ident
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/Columnar.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/Columnar.scala
index 1971b8b1baf..c3118d7e78c 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/Columnar.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/Columnar.scala
@@ -363,7 +363,7 @@ private object RowToColumnConverter {
     override def append(row: SpecializedGetters, column: Int, cv: WritableColumnVector): Unit = {
       cv.appendStruct(false)
       val data = row.getStruct(column, childConverters.length)
-      for (i <- 0 until childConverters.length) {
+      for (i <- childConverters.indices) {
         childConverters(i).append(data, i, cv.getChild(i))
       }
     }
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/ShuffledRowRDD.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/ShuffledRowRDD.scala
index 7e7100338dd..367732dbb20 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/ShuffledRowRDD.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/ShuffledRowRDD.scala
@@ -85,7 +85,7 @@ class CoalescedPartitioner(val parent: Partitioner, val partitionStartIndices: A
   @transient private lazy val parentPartitionMapping: Array[Int] = {
     val n = parent.numPartitions
     val result = new Array[Int](n)
-    for (i <- 0 until partitionStartIndices.length) {
+    for (i <- partitionStartIndices.indices) {
       val start = partitionStartIndices(i)
       val end = if (i < partitionStartIndices.length - 1) partitionStartIndices(i + 1) else n
       for (j <- start until end) {
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StateStoreMetricsTest.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StateStoreMetricsTest.scala
index 0abc79a5515..57ced748cd9 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StateStoreMetricsTest.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StateStoreMetricsTest.scala
@@ -131,7 +131,7 @@ trait StateStoreMetricsTest extends StreamTest {
   def assertNumStateRows(total: Seq[Long], updated: Seq[Long]): AssertOnQuery = {
     assert(total.length === updated.length)
     assertNumStateRows(
-      total, updated, droppedByWatermark = (0 until total.length).map(_ => 0L), None)
+      total, updated, droppedByWatermark = total.indices.map(_ => 0L), None)
   }
 
   def assertNumStateRows(
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/UDAQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/UDAQuerySuite.scala
index 8243b9d185d..269799c123f 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/UDAQuerySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/UDAQuerySuite.scala
@@ -116,14 +116,14 @@ object ArrayDataAgg extends Aggregator[Array[Double], Array[Double], Array[Doubl
   def zero: Array[Double] = Array(0.0, 0.0, 0.0)
   def reduce(s: Array[Double], array: Array[Double]): Array[Double] = {
     require(s.length == array.length)
-    for ( j <- 0 until s.length ) {
+    for ( j <- s.indices) {
       s(j) += array(j)
     }
     s
   }
   def merge(s1: Array[Double], s2: Array[Double]): Array[Double] = {
     require(s1.length == s2.length)
-    for ( j <- 0 until s1.length ) {
+    for ( j <- s1.indices) {
       s1(j) += s2(j)
     }
     s1
diff --git a/streaming/src/main/scala/org/apache/spark/streaming/scheduler/ReceiverSchedulingPolicy.scala b/streaming/src/main/scala/org/apache/spark/streaming/scheduler/ReceiverSchedulingPolicy.scala
index 0e4a64f6c0e..ce89d623467 100644
--- a/streaming/src/main/scala/org/apache/spark/streaming/scheduler/ReceiverSchedulingPolicy.scala
+++ b/streaming/src/main/scala/org/apache/spark/streaming/scheduler/ReceiverSchedulingPolicy.scala
@@ -92,7 +92,7 @@ private[streaming] class ReceiverSchedulingPolicy {
 
     // Firstly, we need to respect "preferredLocation". So if a receiver has "preferredLocation",
     // we need to make sure the "preferredLocation" is in the candidate scheduled executor list.
-    for (i <- 0 until receivers.length) {
+    for (i <- receivers.indices) {
       // Note: preferredLocation is host but executors are host_executorId
       receivers(i).preferredLocation.foreach { host =>
         hostToExecutors.get(host) match {
diff --git a/streaming/src/test/scala/org/apache/spark/streaming/BasicOperationsSuite.scala b/streaming/src/test/scala/org/apache/spark/streaming/BasicOperationsSuite.scala
index 648fa668988..4429cde0c1f 100644
--- a/streaming/src/test/scala/org/apache/spark/streaming/BasicOperationsSuite.scala
+++ b/streaming/src/test/scala/org/apache/spark/streaming/BasicOperationsSuite.scala
@@ -742,7 +742,7 @@ class BasicOperationsSuite extends TestSuiteBase {
         }
 
         Thread.sleep(200)
-        for (i <- 0 until input.size) {
+        for (i <- input.indices) {
           testServer.send(input(i).toString + "\n")
           Thread.sleep(200)
           val numCompletedBatches = batchCounter.getNumCompletedBatches
diff --git a/streaming/src/test/scala/org/apache/spark/streaming/MasterFailureTest.scala b/streaming/src/test/scala/org/apache/spark/streaming/MasterFailureTest.scala
index 4fc9d13cdda..4b26212ec30 100644
--- a/streaming/src/test/scala/org/apache/spark/streaming/MasterFailureTest.scala
+++ b/streaming/src/test/scala/org/apache/spark/streaming/MasterFailureTest.scala
@@ -369,7 +369,7 @@ class FileGeneratingThread(input: Seq[String], testDir: Path, interval: Long)
     val maxTries = 3
     try {
       Thread.sleep(5000) // To make sure that all the streaming context has been set up
-      for (i <- 0 until input.size) {
+      for (i <- input.indices) {
         // Write the data to a local file and then move it to the target test directory
         val localFile = new File(localTestDir, (i + 1).toString)
         val hadoopFile = new Path(testDir, (i + 1).toString)
diff --git a/streaming/src/test/scala/org/apache/spark/streaming/TestSuiteBase.scala b/streaming/src/test/scala/org/apache/spark/streaming/TestSuiteBase.scala
index 7ce4343acbd..5836478937d 100644
--- a/streaming/src/test/scala/org/apache/spark/streaming/TestSuiteBase.scala
+++ b/streaming/src/test/scala/org/apache/spark/streaming/TestSuiteBase.scala
@@ -468,7 +468,7 @@ trait TestSuiteBase extends SparkFunSuite with BeforeAndAfterEach with Logging {
     logInfo("--------------------------------")
 
     // Match the output with the expected output
-    for (i <- 0 until output.size) {
+    for (i <- output.indices) {
       if (useSet) {
         assert(
           output(i).toSet === expectedOutput(i).toSet,


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org