You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by an...@apache.org on 2016/03/11 00:57:32 UTC

spark git commit: [SPARK-3854][BUILD] Scala style: require spaces before `{`.

Repository: spark
Updated Branches:
  refs/heads/master 81d48532d -> 91fed8e9c


[SPARK-3854][BUILD] Scala style: require spaces before `{`.

## What changes were proposed in this pull request?

Since the opening curly brace, '{', has many usages as discussed in [SPARK-3854](https://issues.apache.org/jira/browse/SPARK-3854), this PR adds a ScalaStyle rule to prevent '){' pattern  for the following majority pattern and fixes the code accordingly. If we enforce this in ScalaStyle from now, it will improve the Scala code quality and reduce review time.
```
// Correct:
if (true) {
  println("Wow!")
}

// Incorrect:
if (true){
   println("Wow!")
}
```
IntelliJ also shows new warnings based on this.

## How was this patch tested?

Pass the Jenkins ScalaStyle test.

Author: Dongjoon Hyun <do...@apache.org>

Closes #11637 from dongjoon-hyun/SPARK-3854.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/91fed8e9
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/91fed8e9
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/91fed8e9

Branch: refs/heads/master
Commit: 91fed8e9c57764eca9463d129ecd68196db7f566
Parents: 81d4853
Author: Dongjoon Hyun <do...@apache.org>
Authored: Thu Mar 10 15:57:22 2016 -0800
Committer: Andrew Or <an...@databricks.com>
Committed: Thu Mar 10 15:57:22 2016 -0800

----------------------------------------------------------------------
 core/src/main/scala/org/apache/spark/rdd/RDD.scala           | 2 +-
 core/src/main/scala/org/apache/spark/rpc/RpcTimeout.scala    | 2 +-
 .../scala/org/apache/spark/scheduler/InputFormatInfo.scala   | 2 +-
 .../scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala | 2 +-
 .../org/apache/spark/status/api/v1/OneStageResource.scala    | 2 +-
 .../main/scala/org/apache/spark/storage/BlockManagerId.scala | 2 +-
 core/src/test/scala/org/apache/spark/AccumulatorSuite.scala  | 2 +-
 .../test/scala/org/apache/spark/ImplicitOrderingSuite.scala  | 2 +-
 .../apache/spark/deploy/history/ApplicationCacheSuite.scala  | 2 +-
 .../main/scala/org/apache/spark/examples/LocalFileLR.scala   | 2 +-
 .../main/scala/org/apache/spark/examples/LocalKMeans.scala   | 2 +-
 .../src/main/scala/org/apache/spark/examples/LocalLR.scala   | 4 ++--
 .../main/scala/org/apache/spark/examples/SparkHdfsLR.scala   | 2 +-
 .../src/main/scala/org/apache/spark/examples/SparkLR.scala   | 4 ++--
 .../org/apache/spark/examples/streaming/ActorWordCount.scala | 2 +-
 mllib/src/main/scala/org/apache/spark/ml/ann/Layer.scala     | 8 ++++----
 .../org/apache/spark/ml/classification/NaiveBayes.scala      | 2 +-
 .../org/apache/spark/mllib/api/python/PythonMLLibAPI.scala   | 2 +-
 .../org/apache/spark/mllib/classification/NaiveBayes.scala   | 2 +-
 .../apache/spark/mllib/evaluation/MultilabelMetrics.scala    | 4 ++--
 .../src/main/scala/org/apache/spark/mllib/linalg/BLAS.scala  | 2 +-
 .../apache/spark/mllib/regression/IsotonicRegression.scala   | 2 +-
 .../scala/org/apache/spark/mllib/util/NumericParser.scala    | 2 +-
 .../ml/classification/RandomForestClassifierSuite.scala      | 2 +-
 .../apache/spark/mllib/clustering/GaussianMixtureSuite.scala | 2 +-
 .../org/apache/spark/mllib/tree/RandomForestSuite.scala      | 2 +-
 .../src/main/scala/org/apache/spark/repl/SparkILoop.scala    | 2 +-
 scalastyle-config.xml                                        | 8 ++++++++
 .../spark/sql/catalyst/expressions/complexTypeCreator.scala  | 2 +-
 .../spark/sql/catalyst/expressions/mathExpressions.scala     | 8 ++++----
 .../src/test/scala/org/apache/spark/sql/RowTest.scala        | 2 +-
 .../spark/sql/catalyst/optimizer/FilterPushdownSuite.scala   | 2 +-
 sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala | 2 +-
 sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala   | 2 +-
 .../spark/sql/execution/datasources/CompressionCodecs.scala  | 2 +-
 .../org/apache/spark/sql/execution/ui/ExecutionPage.scala    | 2 +-
 .../sql/execution/datasources/parquet/ParquetIOSuite.scala   | 2 +-
 .../org/apache/spark/sql/hive/client/FiltersSuite.scala      | 2 +-
 .../org/apache/spark/sql/hive/execution/HiveUDFSuite.scala   | 2 +-
 .../main/scala/org/apache/spark/streaming/Checkpoint.scala   | 2 +-
 .../scala/org/apache/spark/streaming/ui/StreamingPage.scala  | 2 +-
 .../apache/spark/streaming/receiver/RateLimiterSuite.scala   | 6 +++---
 42 files changed, 60 insertions(+), 52 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/core/src/main/scala/org/apache/spark/rdd/RDD.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/rdd/RDD.scala b/core/src/main/scala/org/apache/spark/rdd/RDD.scala
index 8bf4489..8782fcd 100644
--- a/core/src/main/scala/org/apache/spark/rdd/RDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/RDD.scala
@@ -722,7 +722,7 @@ abstract class RDD[T: ClassTag](
    *                        An example of pipe the RDD data of groupBy() in a streaming way,
    *                        instead of constructing a huge String to concat all the elements:
    *                        def printRDDElement(record:(String, Seq[String]), f:String=&gt;Unit) =
-   *                          for (e &lt;- record._2){f(e)}
+   *                          for (e &lt;- record._2) {f(e)}
    * @param separateWorkingDir Use separate working directories for each task.
    * @return the result RDD
    */

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/core/src/main/scala/org/apache/spark/rpc/RpcTimeout.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/rpc/RpcTimeout.scala b/core/src/main/scala/org/apache/spark/rpc/RpcTimeout.scala
index 8b4ebf3..2950df6 100644
--- a/core/src/main/scala/org/apache/spark/rpc/RpcTimeout.scala
+++ b/core/src/main/scala/org/apache/spark/rpc/RpcTimeout.scala
@@ -119,7 +119,7 @@ private[spark] object RpcTimeout {
     // Find the first set property or use the default value with the first property
     val itr = timeoutPropList.iterator
     var foundProp: Option[(String, String)] = None
-    while (itr.hasNext && foundProp.isEmpty){
+    while (itr.hasNext && foundProp.isEmpty) {
       val propKey = itr.next()
       conf.getOption(propKey).foreach { prop => foundProp = Some(propKey, prop) }
     }

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/core/src/main/scala/org/apache/spark/scheduler/InputFormatInfo.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/scheduler/InputFormatInfo.scala b/core/src/main/scala/org/apache/spark/scheduler/InputFormatInfo.scala
index 8235b10..def0aac 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/InputFormatInfo.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/InputFormatInfo.scala
@@ -173,7 +173,7 @@ object InputFormatInfo {
     for (inputSplit <- formats) {
       val splits = inputSplit.findPreferredLocations()
 
-      for (split <- splits){
+      for (split <- splits) {
         val location = split.hostLocation
         val set = nodeToSplit.getOrElseUpdate(location, new HashSet[SplitInfo])
         set += split

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala b/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala
index 8b2f497..36df032 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala
@@ -623,7 +623,7 @@ private[spark] object TaskSchedulerImpl {
         val containerList: ArrayBuffer[T] = map.getOrElse(key, null)
         assert(containerList != null)
         // Get the index'th entry for this host - if present
-        if (index < containerList.size){
+        if (index < containerList.size) {
           retval += containerList.apply(index)
           found = true
         }

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/core/src/main/scala/org/apache/spark/status/api/v1/OneStageResource.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/status/api/v1/OneStageResource.scala b/core/src/main/scala/org/apache/spark/status/api/v1/OneStageResource.scala
index f9812f0..3e6d294 100644
--- a/core/src/main/scala/org/apache/spark/status/api/v1/OneStageResource.scala
+++ b/core/src/main/scala/org/apache/spark/status/api/v1/OneStageResource.scala
@@ -33,7 +33,7 @@ private[v1] class OneStageResource(ui: SparkUI) {
   @GET
   @Path("")
   def stageData(@PathParam("stageId") stageId: Int): Seq[StageData] = {
-    withStage(stageId){ stageAttempts =>
+    withStage(stageId) { stageAttempts =>
       stageAttempts.map { stage =>
         AllStagesResource.stageUiToStageData(stage.status, stage.info, stage.ui,
           includeDetails = true)

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/core/src/main/scala/org/apache/spark/storage/BlockManagerId.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/storage/BlockManagerId.scala b/core/src/main/scala/org/apache/spark/storage/BlockManagerId.scala
index 69ac375..cae7c9e 100644
--- a/core/src/main/scala/org/apache/spark/storage/BlockManagerId.scala
+++ b/core/src/main/scala/org/apache/spark/storage/BlockManagerId.scala
@@ -44,7 +44,7 @@ class BlockManagerId private (
 
   def executorId: String = executorId_
 
-  if (null != host_){
+  if (null != host_) {
     Utils.checkHost(host_, "Expected hostname")
     assert (port_ > 0)
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/core/src/test/scala/org/apache/spark/AccumulatorSuite.scala
----------------------------------------------------------------------
diff --git a/core/src/test/scala/org/apache/spark/AccumulatorSuite.scala b/core/src/test/scala/org/apache/spark/AccumulatorSuite.scala
index 4ff8ae5..61ab240 100644
--- a/core/src/test/scala/org/apache/spark/AccumulatorSuite.scala
+++ b/core/src/test/scala/org/apache/spark/AccumulatorSuite.scala
@@ -57,7 +57,7 @@ class AccumulatorSuite extends SparkFunSuite with Matchers with LocalSparkContex
       }
     }
 
-  test ("basic accumulation"){
+  test ("basic accumulation") {
     sc = new SparkContext("local", "test")
     val acc : Accumulator[Int] = sc.accumulator(0)
 

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/core/src/test/scala/org/apache/spark/ImplicitOrderingSuite.scala
----------------------------------------------------------------------
diff --git a/core/src/test/scala/org/apache/spark/ImplicitOrderingSuite.scala b/core/src/test/scala/org/apache/spark/ImplicitOrderingSuite.scala
index 4399f25..939f12f 100644
--- a/core/src/test/scala/org/apache/spark/ImplicitOrderingSuite.scala
+++ b/core/src/test/scala/org/apache/spark/ImplicitOrderingSuite.scala
@@ -21,7 +21,7 @@ import org.apache.spark.rdd.RDD
 
 class ImplicitOrderingSuite extends SparkFunSuite with LocalSparkContext {
   // Tests that PairRDDFunctions grabs an implicit Ordering in various cases where it should.
-  test("basic inference of Orderings"){
+  test("basic inference of Orderings") {
     sc = new SparkContext("local", "test")
     val rdd = sc.parallelize(1 to 10)
 

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/core/src/test/scala/org/apache/spark/deploy/history/ApplicationCacheSuite.scala
----------------------------------------------------------------------
diff --git a/core/src/test/scala/org/apache/spark/deploy/history/ApplicationCacheSuite.scala b/core/src/test/scala/org/apache/spark/deploy/history/ApplicationCacheSuite.scala
index de6680c..e241887 100644
--- a/core/src/test/scala/org/apache/spark/deploy/history/ApplicationCacheSuite.scala
+++ b/core/src/test/scala/org/apache/spark/deploy/history/ApplicationCacheSuite.scala
@@ -476,7 +476,7 @@ class ApplicationCacheSuite extends SparkFunSuite with Logging with MockitoSugar
     when(request.getRequestURI()).thenReturn("http://localhost:18080/history/local-123/jobs/job/")
     when(request.getQueryString()).thenReturn("id=2")
     val resp = mock[HttpServletResponse]
-    when(resp.encodeRedirectURL(any())).thenAnswer(new Answer[String](){
+    when(resp.encodeRedirectURL(any())).thenAnswer(new Answer[String]() {
       override def answer(invocationOnMock: InvocationOnMock): String = {
         invocationOnMock.getArguments()(0).asInstanceOf[String]
       }

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/examples/src/main/scala/org/apache/spark/examples/LocalFileLR.scala
----------------------------------------------------------------------
diff --git a/examples/src/main/scala/org/apache/spark/examples/LocalFileLR.scala b/examples/src/main/scala/org/apache/spark/examples/LocalFileLR.scala
index f2e4c96..bec89f7 100644
--- a/examples/src/main/scala/org/apache/spark/examples/LocalFileLR.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/LocalFileLR.scala
@@ -58,7 +58,7 @@ object LocalFileLR {
     val ITERATIONS = args(1).toInt
 
     // Initialize w to a random value
-    var w = DenseVector.fill(D){2 * rand.nextDouble - 1}
+    var w = DenseVector.fill(D) {2 * rand.nextDouble - 1}
     println("Initial w: " + w)
 
     for (i <- 1 to ITERATIONS) {

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/examples/src/main/scala/org/apache/spark/examples/LocalKMeans.scala
----------------------------------------------------------------------
diff --git a/examples/src/main/scala/org/apache/spark/examples/LocalKMeans.scala b/examples/src/main/scala/org/apache/spark/examples/LocalKMeans.scala
index 19bebff..f896184 100644
--- a/examples/src/main/scala/org/apache/spark/examples/LocalKMeans.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/LocalKMeans.scala
@@ -41,7 +41,7 @@ object LocalKMeans {
 
   def generateData: Array[DenseVector[Double]] = {
     def generatePoint(i: Int): DenseVector[Double] = {
-      DenseVector.fill(D){rand.nextDouble * R}
+      DenseVector.fill(D) {rand.nextDouble * R}
     }
     Array.tabulate(N)(generatePoint)
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/examples/src/main/scala/org/apache/spark/examples/LocalLR.scala
----------------------------------------------------------------------
diff --git a/examples/src/main/scala/org/apache/spark/examples/LocalLR.scala b/examples/src/main/scala/org/apache/spark/examples/LocalLR.scala
index 58adbab..0baf6db 100644
--- a/examples/src/main/scala/org/apache/spark/examples/LocalLR.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/LocalLR.scala
@@ -41,7 +41,7 @@ object LocalLR {
   def generateData: Array[DataPoint] = {
     def generatePoint(i: Int): DataPoint = {
       val y = if (i % 2 == 0) -1 else 1
-      val x = DenseVector.fill(D){rand.nextGaussian + y * R}
+      val x = DenseVector.fill(D) {rand.nextGaussian + y * R}
       DataPoint(x, y)
     }
     Array.tabulate(N)(generatePoint)
@@ -62,7 +62,7 @@ object LocalLR {
 
     val data = generateData
     // Initialize w to a random value
-    var w = DenseVector.fill(D){2 * rand.nextDouble - 1}
+    var w = DenseVector.fill(D) {2 * rand.nextDouble - 1}
     println("Initial w: " + w)
 
     for (i <- 1 to ITERATIONS) {

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/examples/src/main/scala/org/apache/spark/examples/SparkHdfsLR.scala
----------------------------------------------------------------------
diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkHdfsLR.scala b/examples/src/main/scala/org/apache/spark/examples/SparkHdfsLR.scala
index f7eb9e9..7463b86 100644
--- a/examples/src/main/scala/org/apache/spark/examples/SparkHdfsLR.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/SparkHdfsLR.scala
@@ -78,7 +78,7 @@ object SparkHdfsLR {
     val ITERATIONS = args(1).toInt
 
     // Initialize w to a random value
-    var w = DenseVector.fill(D){2 * rand.nextDouble - 1}
+    var w = DenseVector.fill(D) {2 * rand.nextDouble - 1}
     println("Initial w: " + w)
 
     for (i <- 1 to ITERATIONS) {

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/examples/src/main/scala/org/apache/spark/examples/SparkLR.scala
----------------------------------------------------------------------
diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkLR.scala b/examples/src/main/scala/org/apache/spark/examples/SparkLR.scala
index 036e3d2..acd8656 100644
--- a/examples/src/main/scala/org/apache/spark/examples/SparkLR.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/SparkLR.scala
@@ -46,7 +46,7 @@ object SparkLR {
   def generateData: Array[DataPoint] = {
     def generatePoint(i: Int): DataPoint = {
       val y = if (i % 2 == 0) -1 else 1
-      val x = DenseVector.fill(D){rand.nextGaussian + y * R}
+      val x = DenseVector.fill(D) {rand.nextGaussian + y * R}
       DataPoint(x, y)
     }
     Array.tabulate(N)(generatePoint)
@@ -71,7 +71,7 @@ object SparkLR {
     val points = sc.parallelize(generateData, numSlices).cache()
 
     // Initialize w to a random value
-    var w = DenseVector.fill(D){2 * rand.nextDouble - 1}
+    var w = DenseVector.fill(D) {2 * rand.nextDouble - 1}
     println("Initial w: " + w)
 
     for (i <- 1 to ITERATIONS) {

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/examples/src/main/scala/org/apache/spark/examples/streaming/ActorWordCount.scala
----------------------------------------------------------------------
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/ActorWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/ActorWordCount.scala
index 2770b8a..844772a 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/ActorWordCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/ActorWordCount.scala
@@ -100,7 +100,7 @@ class SampleActorReceiver[T](urlOfPublisher: String) extends ActorReceiver {
 object FeederActor {
 
   def main(args: Array[String]) {
-    if (args.length < 2){
+    if (args.length < 2) {
       System.err.println("Usage: FeederActor <hostname> <port>\n")
       System.exit(1)
     }

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/mllib/src/main/scala/org/apache/spark/ml/ann/Layer.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/ml/ann/Layer.scala b/mllib/src/main/scala/org/apache/spark/ml/ann/Layer.scala
index d02806a..f21b623 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/ann/Layer.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/ann/Layer.scala
@@ -213,8 +213,8 @@ private[ann] object AffineLayerModel {
    */
   def randomWeights(numIn: Int, numOut: Int, seed: Long = 11L): (BDM[Double], BDV[Double]) = {
     val rand: XORShiftRandom = new XORShiftRandom(seed)
-    val weights = BDM.fill[Double](numOut, numIn){ (rand.nextDouble * 4.8 - 2.4) / numIn }
-    val bias = BDV.fill[Double](numOut){ (rand.nextDouble * 4.8 - 2.4) / numIn }
+    val weights = BDM.fill[Double](numOut, numIn) { (rand.nextDouble * 4.8 - 2.4) / numIn }
+    val bias = BDV.fill[Double](numOut) { (rand.nextDouble * 4.8 - 2.4) / numIn }
     (weights, bias)
   }
 }
@@ -529,7 +529,7 @@ private[ml] object FeedForwardTopology {
    */
   def multiLayerPerceptron(layerSizes: Array[Int], softmax: Boolean = true): FeedForwardTopology = {
     val layers = new Array[Layer]((layerSizes.length - 1) * 2)
-    for(i <- 0 until layerSizes.length - 1){
+    for(i <- 0 until layerSizes.length - 1) {
       layers(i * 2) = new AffineLayer(layerSizes(i), layerSizes(i + 1))
       layers(i * 2 + 1) =
         if (softmax && i == layerSizes.length - 2) {
@@ -655,7 +655,7 @@ private[ann] object FeedForwardModel {
     val layers = topology.layers
     val layerModels = new Array[LayerModel](layers.length)
     var offset = 0
-    for(i <- 0 until layers.length){
+    for(i <- 0 until layers.length) {
       layerModels(i) = layers(i).getInstance(seed)
       offset += layerModels(i).size
     }

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/mllib/src/main/scala/org/apache/spark/ml/classification/NaiveBayes.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/ml/classification/NaiveBayes.scala b/mllib/src/main/scala/org/apache/spark/ml/classification/NaiveBayes.scala
index 718f49d..483ef0d 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/classification/NaiveBayes.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/classification/NaiveBayes.scala
@@ -145,7 +145,7 @@ class NaiveBayesModel private[ml] (
     case Multinomial => (None, None)
     case Bernoulli =>
       val negTheta = theta.map(value => math.log(1.0 - math.exp(value)))
-      val ones = new DenseVector(Array.fill(theta.numCols){1.0})
+      val ones = new DenseVector(Array.fill(theta.numCols) {1.0})
       val thetaMinusNegTheta = theta.map { value =>
         value - math.log(1.0 - math.exp(value))
       }

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/mllib/src/main/scala/org/apache/spark/mllib/api/python/PythonMLLibAPI.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/api/python/PythonMLLibAPI.scala b/mllib/src/main/scala/org/apache/spark/mllib/api/python/PythonMLLibAPI.scala
index 886cd60..132dc17 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/api/python/PythonMLLibAPI.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/api/python/PythonMLLibAPI.scala
@@ -428,7 +428,7 @@ private[python] class PythonMLLibAPI extends Serializable {
       val weight = wt.toArray
       val mean = mu.map(_.asInstanceOf[DenseVector])
       val sigma = si.map(_.asInstanceOf[DenseMatrix])
-      val gaussians = Array.tabulate(weight.length){
+      val gaussians = Array.tabulate(weight.length) {
         i => new MultivariateGaussian(mean(i), sigma(i))
       }
       val model = new GaussianMixtureModel(weight, gaussians)

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/mllib/src/main/scala/org/apache/spark/mllib/classification/NaiveBayes.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/classification/NaiveBayes.scala b/mllib/src/main/scala/org/apache/spark/mllib/classification/NaiveBayes.scala
index aef9ef2..9026b97 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/classification/NaiveBayes.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/classification/NaiveBayes.scala
@@ -74,7 +74,7 @@ class NaiveBayesModel private[spark] (
     case Multinomial => (None, None)
     case Bernoulli =>
       val negTheta = thetaMatrix.map(value => math.log(1.0 - math.exp(value)))
-      val ones = new DenseVector(Array.fill(thetaMatrix.numCols){1.0})
+      val ones = new DenseVector(Array.fill(thetaMatrix.numCols) {1.0})
       val thetaMinusNegTheta = thetaMatrix.map { value =>
         value - math.log(1.0 - math.exp(value))
       }

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/mllib/src/main/scala/org/apache/spark/mllib/evaluation/MultilabelMetrics.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/evaluation/MultilabelMetrics.scala b/mllib/src/main/scala/org/apache/spark/mllib/evaluation/MultilabelMetrics.scala
index 6dd541e..77bd0aa 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/evaluation/MultilabelMetrics.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/evaluation/MultilabelMetrics.scala
@@ -152,7 +152,7 @@ class MultilabelMetrics @Since("1.2.0") (predictionAndLabels: RDD[(Array[Double]
    */
   @Since("1.2.0")
   lazy val microPrecision: Double = {
-    val sumFp = fpPerClass.foldLeft(0L){ case(cum, (_, fp)) => cum + fp}
+    val sumFp = fpPerClass.foldLeft(0L) { case(cum, (_, fp)) => cum + fp}
     sumTp.toDouble / (sumTp + sumFp)
   }
 
@@ -162,7 +162,7 @@ class MultilabelMetrics @Since("1.2.0") (predictionAndLabels: RDD[(Array[Double]
    */
   @Since("1.2.0")
   lazy val microRecall: Double = {
-    val sumFn = fnPerClass.foldLeft(0.0){ case(cum, (_, fn)) => cum + fn}
+    val sumFn = fnPerClass.foldLeft(0.0) { case(cum, (_, fn)) => cum + fn}
     sumTp.toDouble / (sumTp + sumFn)
   }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/mllib/src/main/scala/org/apache/spark/mllib/linalg/BLAS.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/linalg/BLAS.scala b/mllib/src/main/scala/org/apache/spark/mllib/linalg/BLAS.scala
index d2687dc..27a7380 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/linalg/BLAS.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/linalg/BLAS.scala
@@ -420,7 +420,7 @@ private[spark] object BLAS extends Serializable with Logging {
     val AcolPtrs = A.colPtrs
 
     // Slicing is easy in this case. This is the optimal multiplication setting for sparse matrices
-    if (A.isTransposed){
+    if (A.isTransposed) {
       var colCounterForB = 0
       if (!B.isTransposed) { // Expensive to put the check inside the loop
         while (colCounterForB < nB) {

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/mllib/src/main/scala/org/apache/spark/mllib/regression/IsotonicRegression.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/regression/IsotonicRegression.scala b/mllib/src/main/scala/org/apache/spark/mllib/regression/IsotonicRegression.scala
index f235089..abdd798 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/regression/IsotonicRegression.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/regression/IsotonicRegression.scala
@@ -136,7 +136,7 @@ class IsotonicRegressionModel @Since("1.3.0") (
     // higher than all values, in between two values or exact match.
     if (insertIndex == 0) {
       predictions.head
-    } else if (insertIndex == boundaries.length){
+    } else if (insertIndex == boundaries.length) {
       predictions.last
     } else if (foundIndex < 0) {
       linearInterpolation(

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/mllib/src/main/scala/org/apache/spark/mllib/util/NumericParser.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/util/NumericParser.scala b/mllib/src/main/scala/org/apache/spark/mllib/util/NumericParser.scala
index a841c5c..2c61334 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/util/NumericParser.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/util/NumericParser.scala
@@ -98,7 +98,7 @@ private[mllib] object NumericParser {
         }
       } else if (token == ")") {
         parsing = false
-      } else if (token.trim.isEmpty){
+      } else if (token.trim.isEmpty) {
           // ignore whitespaces between delim chars, e.g. ", ["
       } else {
         // expecting a number

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/mllib/src/test/scala/org/apache/spark/ml/classification/RandomForestClassifierSuite.scala
----------------------------------------------------------------------
diff --git a/mllib/src/test/scala/org/apache/spark/ml/classification/RandomForestClassifierSuite.scala b/mllib/src/test/scala/org/apache/spark/ml/classification/RandomForestClassifierSuite.scala
index 6b810ab..4c7c567 100644
--- a/mllib/src/test/scala/org/apache/spark/ml/classification/RandomForestClassifierSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/ml/classification/RandomForestClassifierSuite.scala
@@ -105,7 +105,7 @@ class RandomForestClassifierSuite extends SparkFunSuite with MLlibTestSparkConte
     compareAPIs(rdd, rf, categoricalFeatures, numClasses)
   }
 
-  test("subsampling rate in RandomForest"){
+  test("subsampling rate in RandomForest") {
     val rdd = orderedLabeledPoints5_20
     val categoricalFeatures = Map.empty[Int, Int]
     val numClasses = 2

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/mllib/src/test/scala/org/apache/spark/mllib/clustering/GaussianMixtureSuite.scala
----------------------------------------------------------------------
diff --git a/mllib/src/test/scala/org/apache/spark/mllib/clustering/GaussianMixtureSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/clustering/GaussianMixtureSuite.scala
index fb3bd3f..67e680b 100644
--- a/mllib/src/test/scala/org/apache/spark/mllib/clustering/GaussianMixtureSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/mllib/clustering/GaussianMixtureSuite.scala
@@ -182,7 +182,7 @@ class GaussianMixtureSuite extends SparkFunSuite with MLlibTestSparkContext {
       Vectors.dense( 4.5605), Vectors.dense( 5.2043), Vectors.dense( 6.2734)
     )
 
-    val data2: Array[Vector] = Array.tabulate(25){ i: Int =>
+    val data2: Array[Vector] = Array.tabulate(25) { i: Int =>
       Vectors.dense(Array.tabulate(50)(i + _.toDouble))
     }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/mllib/src/test/scala/org/apache/spark/mllib/tree/RandomForestSuite.scala
----------------------------------------------------------------------
diff --git a/mllib/src/test/scala/org/apache/spark/mllib/tree/RandomForestSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/tree/RandomForestSuite.scala
index e6df5d9..c72fc9b 100644
--- a/mllib/src/test/scala/org/apache/spark/mllib/tree/RandomForestSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/mllib/tree/RandomForestSuite.scala
@@ -197,7 +197,7 @@ class RandomForestSuite extends SparkFunSuite with MLlibTestSparkContext {
       featureSubsetStrategy = "sqrt", seed = 12345)
   }
 
-  test("subsampling rate in RandomForest"){
+  test("subsampling rate in RandomForest") {
     val arr = EnsembleTestHelper.generateOrderedLabeledPoints(5, 20)
     val rdd = sc.parallelize(arr)
     val strategy = new Strategy(algo = Classification, impurity = Gini, maxDepth = 2,

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkILoop.scala
----------------------------------------------------------------------
diff --git a/repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkILoop.scala b/repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkILoop.scala
index 22749c4..2a8fa45 100644
--- a/repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkILoop.scala
+++ b/repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkILoop.scala
@@ -169,7 +169,7 @@ class SparkILoop(
   }
 
 
-  private def sparkCleanUp(){
+  private def sparkCleanUp() {
     echo("Stopping spark context.")
     intp.beQuietDuring {
       command("sc.stop()")

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/scalastyle-config.xml
----------------------------------------------------------------------
diff --git a/scalastyle-config.xml b/scalastyle-config.xml
index 64619d2..37d2ecf 100644
--- a/scalastyle-config.xml
+++ b/scalastyle-config.xml
@@ -215,6 +215,14 @@ This file is divided into 3 sections:
     </parameters>
   </check>
 
+  <!-- SPARK-3854: Single Space between ')' and '{' -->
+  <check customId="SingleSpaceBetweenRParenAndLCurlyBrace" level="error" class="org.scalastyle.file.RegexChecker" enabled="true">
+    <parameters><parameter name="regex">\)\{</parameter></parameters>
+    <customMessage><![CDATA[
+      Single Space between ')' and `{`.
+    ]]></customMessage>
+  </check>
+
   <!-- ================================================================================ -->
   <!--       rules we'd like to enforce, but haven't cleaned up the codebase yet        -->
   <!-- ================================================================================ -->

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/complexTypeCreator.scala
----------------------------------------------------------------------
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/complexTypeCreator.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/complexTypeCreator.scala
index 87e4342..efd7529 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/complexTypeCreator.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/complexTypeCreator.scala
@@ -159,7 +159,7 @@ case class CreateNamedStruct(children: Seq[Expression]) extends Expression {
         TypeCheckResult.TypeCheckFailure(
           s"Only foldable StringType expressions are allowed to appear at odd position , got :" +
             s" ${invalidNames.mkString(",")}")
-      } else if (!names.contains(null)){
+      } else if (!names.contains(null)) {
         TypeCheckResult.TypeCheckSuccess
       } else {
         TypeCheckResult.TypeCheckFailure("Field name should not be null")

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/mathExpressions.scala
----------------------------------------------------------------------
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/mathExpressions.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/mathExpressions.scala
index bc2df0f..12fcc40 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/mathExpressions.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/mathExpressions.scala
@@ -806,14 +806,14 @@ case class Round(child: Expression, scale: Expression)
       case FloatType => // if child eval to NaN or Infinity, just return it.
         if (_scale == 0) {
           s"""
-            if (Float.isNaN(${ce.value}) || Float.isInfinite(${ce.value})){
+            if (Float.isNaN(${ce.value}) || Float.isInfinite(${ce.value})) {
               ${ev.value} = ${ce.value};
             } else {
               ${ev.value} = Math.round(${ce.value});
             }"""
         } else {
           s"""
-            if (Float.isNaN(${ce.value}) || Float.isInfinite(${ce.value})){
+            if (Float.isNaN(${ce.value}) || Float.isInfinite(${ce.value})) {
               ${ev.value} = ${ce.value};
             } else {
               ${ev.value} = java.math.BigDecimal.valueOf(${ce.value}).
@@ -823,14 +823,14 @@ case class Round(child: Expression, scale: Expression)
       case DoubleType => // if child eval to NaN or Infinity, just return it.
         if (_scale == 0) {
           s"""
-            if (Double.isNaN(${ce.value}) || Double.isInfinite(${ce.value})){
+            if (Double.isNaN(${ce.value}) || Double.isInfinite(${ce.value})) {
               ${ev.value} = ${ce.value};
             } else {
               ${ev.value} = Math.round(${ce.value});
             }"""
         } else {
           s"""
-            if (Double.isNaN(${ce.value}) || Double.isInfinite(${ce.value})){
+            if (Double.isNaN(${ce.value}) || Double.isInfinite(${ce.value})) {
               ${ev.value} = ${ce.value};
             } else {
               ${ev.value} = java.math.BigDecimal.valueOf(${ce.value}).

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/sql/catalyst/src/test/scala/org/apache/spark/sql/RowTest.scala
----------------------------------------------------------------------
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/RowTest.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/RowTest.scala
index 1e71181..d9577de 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/RowTest.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/RowTest.scala
@@ -86,7 +86,7 @@ class RowTest extends FunSpec with Matchers {
       }
     }
 
-    it("getAs() on type extending AnyVal does not throw exception when value is null"){
+    it("getAs() on type extending AnyVal does not throw exception when value is null") {
       sampleRowWithoutCol3.getAs[String](sampleRowWithoutCol3.fieldIndex("col1")) shouldBe null
     }
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/FilterPushdownSuite.scala
----------------------------------------------------------------------
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/FilterPushdownSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/FilterPushdownSuite.scala
index 97a0cde..a636d63 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/FilterPushdownSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/FilterPushdownSuite.scala
@@ -535,7 +535,7 @@ class FilterPushdownSuite extends PlanTest {
     // Filter("c" > 6)
     assertResult(classOf[Filter])(optimized.getClass)
     assertResult(1)(optimized.asInstanceOf[Filter].condition.references.size)
-    assertResult("c"){
+    assertResult("c") {
       optimized.asInstanceOf[Filter].condition.references.toSeq(0).name
     }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
index 339e61e..24f6199 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
@@ -1147,7 +1147,7 @@ class DataFrame private[sql](
    * columns of the input row are implicitly joined with each value that is output by the function.
    *
    * {{{
-   *   df.explode("words", "word"){words: String => words.split(" ")}
+   *   df.explode("words", "word") {words: String => words.split(" ")}
    * }}}
    * @group dfops
    * @since 1.3.0

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
index dd1fbcf..daddf6e 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
@@ -605,7 +605,7 @@ class Dataset[T] private[sql](
    * duplicate items.  As such, it is analogous to `UNION ALL` in SQL.
    * @since 1.6.0
    */
-  def union(other: Dataset[T]): Dataset[T] = withPlan[T](other){ (left, right) =>
+  def union(other: Dataset[T]): Dataset[T] = withPlan[T](other) { (left, right) =>
     // This breaks caching, but it's usually ok because it addresses a very specific use case:
     // using union to union many files or partitions.
     CombineUnions(Union(left, right))

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/CompressionCodecs.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/CompressionCodecs.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/CompressionCodecs.scala
index 032ba61..41cff07 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/CompressionCodecs.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/CompressionCodecs.scala
@@ -57,7 +57,7 @@ private[datasources] object CompressionCodecs {
    * `codec` should be a full class path
    */
   def setCodecConfiguration(conf: Configuration, codec: String): Unit = {
-    if (codec != null){
+    if (codec != null) {
       conf.set("mapreduce.output.fileoutputformat.compress", "true")
       conf.set("mapreduce.output.fileoutputformat.compress.type", CompressionType.BLOCK.toString)
       conf.set("mapreduce.output.fileoutputformat.compress.codec", codec)

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/ExecutionPage.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/ExecutionPage.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/ExecutionPage.scala
index 49915ad..9d3cd9b 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/ExecutionPage.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/ExecutionPage.scala
@@ -114,7 +114,7 @@ private[sql] class ExecutionPage(parent: SQLTab) extends WebUIPage("execution")
         {metadata}
       </div>
       {planVisualizationResources}
-      <script>$(function(){{ renderPlanViz(); }})</script>
+      <script>$(function() {{ renderPlanViz(); }})</script>
     </div>
   }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala
index 34e914c..b7834d7 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala
@@ -145,7 +145,7 @@ class ParquetIOSuite extends QueryTest with ParquetTest with SharedSQLContext {
       withTempPath { dir =>
         val data = makeDecimalRDD(DecimalType(precision, scale))
         data.write.parquet(dir.getCanonicalPath)
-        readParquetFile(dir.getCanonicalPath){ df => {
+        readParquetFile(dir.getCanonicalPath) { df => {
           checkAnswer(df, data.collect().toSeq)
         }}
       }

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/FiltersSuite.scala
----------------------------------------------------------------------
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/FiltersSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/FiltersSuite.scala
index 5e7b93d..16b2d04 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/FiltersSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/FiltersSuite.scala
@@ -65,7 +65,7 @@ class FiltersSuite extends SparkFunSuite with Logging {
     "")
 
   private def filterTest(name: String, filters: Seq[Expression], result: String) = {
-    test(name){
+    test(name) {
       val converted = shim.convertFilters(testTable, filters)
       if (converted != result) {
         fail(

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala
----------------------------------------------------------------------
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala
index 703cfff..d7c529a 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala
@@ -361,7 +361,7 @@ class HiveUDFSuite extends QueryTest with TestHiveSingleton with SQLTestUtils {
     }
   }
 
-  test("SPARK-11522 select input_file_name from non-parquet table"){
+  test("SPARK-11522 select input_file_name from non-parquet table") {
 
     withTempDir { tempDir =>
 

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/streaming/src/main/scala/org/apache/spark/streaming/Checkpoint.scala
----------------------------------------------------------------------
diff --git a/streaming/src/main/scala/org/apache/spark/streaming/Checkpoint.scala b/streaming/src/main/scala/org/apache/spark/streaming/Checkpoint.scala
index 298cdc0..11a4c7d 100644
--- a/streaming/src/main/scala/org/apache/spark/streaming/Checkpoint.scala
+++ b/streaming/src/main/scala/org/apache/spark/streaming/Checkpoint.scala
@@ -230,7 +230,7 @@ class CheckpointWriter(
           // If the checkpoint file exists, back it up
           // If the backup exists as well, just delete it, otherwise rename will fail
           if (fs.exists(checkpointFile)) {
-            if (fs.exists(backupFile)){
+            if (fs.exists(backupFile)) {
               fs.delete(backupFile, true) // just in case it exists
             }
             if (!fs.rename(checkpointFile, backupFile)) {

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/streaming/src/main/scala/org/apache/spark/streaming/ui/StreamingPage.scala
----------------------------------------------------------------------
diff --git a/streaming/src/main/scala/org/apache/spark/streaming/ui/StreamingPage.scala b/streaming/src/main/scala/org/apache/spark/streaming/ui/StreamingPage.scala
index eb1e5b0..b1bcd06 100644
--- a/streaming/src/main/scala/org/apache/spark/streaming/ui/StreamingPage.scala
+++ b/streaming/src/main/scala/org/apache/spark/streaming/ui/StreamingPage.scala
@@ -558,7 +558,7 @@ private[ui] class JsCollector {
   def toHtml: Seq[Node] = {
     val js =
       s"""
-         |$$(document).ready(function(){
+         |$$(document).ready(function() {
          |    ${preparedStatements.mkString("\n")}
          |    ${statements.mkString("\n")}
          |});""".stripMargin

http://git-wip-us.apache.org/repos/asf/spark/blob/91fed8e9/streaming/src/test/scala/org/apache/spark/streaming/receiver/RateLimiterSuite.scala
----------------------------------------------------------------------
diff --git a/streaming/src/test/scala/org/apache/spark/streaming/receiver/RateLimiterSuite.scala b/streaming/src/test/scala/org/apache/spark/streaming/receiver/RateLimiterSuite.scala
index c6330eb..ee3817c 100644
--- a/streaming/src/test/scala/org/apache/spark/streaming/receiver/RateLimiterSuite.scala
+++ b/streaming/src/test/scala/org/apache/spark/streaming/receiver/RateLimiterSuite.scala
@@ -25,21 +25,21 @@ class RateLimiterSuite extends SparkFunSuite {
 
   test("rate limiter initializes even without a maxRate set") {
     val conf = new SparkConf()
-    val rateLimiter = new RateLimiter(conf){}
+    val rateLimiter = new RateLimiter(conf) {}
     rateLimiter.updateRate(105)
     assert(rateLimiter.getCurrentLimit == 105)
   }
 
   test("rate limiter updates when below maxRate") {
     val conf = new SparkConf().set("spark.streaming.receiver.maxRate", "110")
-    val rateLimiter = new RateLimiter(conf){}
+    val rateLimiter = new RateLimiter(conf) {}
     rateLimiter.updateRate(105)
     assert(rateLimiter.getCurrentLimit == 105)
   }
 
   test("rate limiter stays below maxRate despite large updates") {
     val conf = new SparkConf().set("spark.streaming.receiver.maxRate", "100")
-    val rateLimiter = new RateLimiter(conf){}
+    val rateLimiter = new RateLimiter(conf) {}
     rateLimiter.updateRate(105)
     assert(rateLimiter.getCurrentLimit === 100)
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org