You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by an...@apache.org on 2016/04/07 01:03:02 UTC

spark git commit: [SPARK-14444][BUILD] Add a new scalastyle `NoScalaDoc` to prevent ScalaDoc-style multiline comments

Repository: spark
Updated Branches:
  refs/heads/master 457e58bef -> d717ae1fd


[SPARK-14444][BUILD] Add a new scalastyle `NoScalaDoc` to prevent ScalaDoc-style multiline comments

## What changes were proposed in this pull request?

According to the [Spark Code Style Guide](https://cwiki.apache.org/confluence/display/SPARK/Spark+Code+Style+Guide#SparkCodeStyleGuide-Indentation), this PR adds a new scalastyle rule to prevent the followings.
```
/** In Spark, we don't use the ScalaDoc style so this
  * is not correct.
  */
```

## How was this patch tested?

Pass the Jenkins tests (including `lint-scala`).

Author: Dongjoon Hyun <do...@apache.org>

Closes #12221 from dongjoon-hyun/SPARK-14444.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/d717ae1f
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/d717ae1f
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/d717ae1f

Branch: refs/heads/master
Commit: d717ae1fd74d125a9df21350a70e7c2b2d2b4786
Parents: 457e58b
Author: Dongjoon Hyun <do...@apache.org>
Authored: Wed Apr 6 16:02:55 2016 -0700
Committer: Andrew Or <an...@databricks.com>
Committed: Wed Apr 6 16:02:55 2016 -0700

----------------------------------------------------------------------
 .../main/scala/org/apache/spark/SparkConf.scala   |  6 ++++--
 .../org/apache/spark/deploy/SparkHadoopUtil.scala | 12 ++++++------
 .../org/apache/spark/partial/BoundedDouble.scala  |  4 ++--
 .../spark/examples/DriverSubmissionTest.scala     |  6 ++++--
 .../spark/streaming/flume/FlumeInputDStream.scala |  6 ++++--
 .../stat/distribution/MultivariateGaussian.scala  | 10 ++++++----
 scalastyle-config.xml                             |  5 +++++
 .../spark/sql/catalyst/ScalaReflection.scala      | 18 +++++++++---------
 .../expressions/codegen/CodeGenerator.scala       |  8 ++++----
 .../spark/sql/catalyst/plans/QueryPlan.scala      |  6 ++++--
 .../spark/sql/RelationalGroupedDataset.scala      |  4 ++--
 .../spark/sql/execution/WholeStageCodegen.scala   |  4 ++--
 .../spark/sql/execution/ui/SparkPlanGraph.scala   |  4 ++--
 .../org/apache/spark/sql/jdbc/JdbcDialects.scala  | 10 +++++-----
 14 files changed, 59 insertions(+), 44 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/d717ae1f/core/src/main/scala/org/apache/spark/SparkConf.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/SparkConf.scala b/core/src/main/scala/org/apache/spark/SparkConf.scala
index 5da2e98..e0fd248 100644
--- a/core/src/main/scala/org/apache/spark/SparkConf.scala
+++ b/core/src/main/scala/org/apache/spark/SparkConf.scala
@@ -419,8 +419,10 @@ class SparkConf(loadDefaults: Boolean) extends Cloneable with Logging {
    */
   private[spark] def getenv(name: String): String = System.getenv(name)
 
-  /** Checks for illegal or deprecated config settings. Throws an exception for the former. Not
-    * idempotent - may mutate this conf object to convert deprecated settings to supported ones. */
+  /**
+   * Checks for illegal or deprecated config settings. Throws an exception for the former. Not
+   * idempotent - may mutate this conf object to convert deprecated settings to supported ones.
+   */
   private[spark] def validateSettings() {
     if (contains("spark.local.dir")) {
       val msg = "In Spark 1.0 and later spark.local.dir will be overridden by the value set by " +

http://git-wip-us.apache.org/repos/asf/spark/blob/d717ae1f/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala b/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala
index 4e8e363..41ac308 100644
--- a/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala
@@ -76,9 +76,9 @@ class SparkHadoopUtil extends Logging {
 
 
   /**
-    * Appends S3-specific, spark.hadoop.*, and spark.buffer.size configurations to a Hadoop
-    * configuration.
-    */
+   * Appends S3-specific, spark.hadoop.*, and spark.buffer.size configurations to a Hadoop
+   * configuration.
+   */
   def appendS3AndSparkHadoopConfigurations(conf: SparkConf, hadoopConf: Configuration): Unit = {
     // Note: this null check is around more than just access to the "conf" object to maintain
     // the behavior of the old implementation of this code, for backwards compatibility.
@@ -108,9 +108,9 @@ class SparkHadoopUtil extends Logging {
   }
 
   /**
-    * Return an appropriate (subclass) of Configuration. Creating config can initializes some Hadoop
-    * subsystems.
-    */
+   * Return an appropriate (subclass) of Configuration. Creating config can initializes some Hadoop
+   * subsystems.
+   */
   def newConfiguration(conf: SparkConf): Configuration = {
     val hadoopConf = new Configuration()
     appendS3AndSparkHadoopConfigurations(conf, hadoopConf)

http://git-wip-us.apache.org/repos/asf/spark/blob/d717ae1f/core/src/main/scala/org/apache/spark/partial/BoundedDouble.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/partial/BoundedDouble.scala b/core/src/main/scala/org/apache/spark/partial/BoundedDouble.scala
index d06b2c6..c562c70 100644
--- a/core/src/main/scala/org/apache/spark/partial/BoundedDouble.scala
+++ b/core/src/main/scala/org/apache/spark/partial/BoundedDouble.scala
@@ -28,8 +28,8 @@ class BoundedDouble(val mean: Double, val confidence: Double, val low: Double, v
     this.mean.hashCode ^ this.confidence.hashCode ^ this.low.hashCode ^ this.high.hashCode
 
   /**
-    * Note that consistent with Double, any NaN value will make equality false
-    */
+   * Note that consistent with Double, any NaN value will make equality false
+   */
   override def equals(that: Any): Boolean =
     that match {
       case that: BoundedDouble => {

http://git-wip-us.apache.org/repos/asf/spark/blob/d717ae1f/examples/src/main/scala/org/apache/spark/examples/DriverSubmissionTest.scala
----------------------------------------------------------------------
diff --git a/examples/src/main/scala/org/apache/spark/examples/DriverSubmissionTest.scala b/examples/src/main/scala/org/apache/spark/examples/DriverSubmissionTest.scala
index a2d59a1..d12ef64 100644
--- a/examples/src/main/scala/org/apache/spark/examples/DriverSubmissionTest.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/DriverSubmissionTest.scala
@@ -22,8 +22,10 @@ import scala.collection.JavaConverters._
 
 import org.apache.spark.util.Utils
 
-/** Prints out environmental information, sleeps, and then exits. Made to
-  * test driver submission in the standalone scheduler. */
+/**
+ * Prints out environmental information, sleeps, and then exits. Made to
+ * test driver submission in the standalone scheduler.
+ */
 object DriverSubmissionTest {
   def main(args: Array[String]) {
     if (args.length < 1) {

http://git-wip-us.apache.org/repos/asf/spark/blob/d717ae1f/external/flume/src/main/scala/org/apache/spark/streaming/flume/FlumeInputDStream.scala
----------------------------------------------------------------------
diff --git a/external/flume/src/main/scala/org/apache/spark/streaming/flume/FlumeInputDStream.scala b/external/flume/src/main/scala/org/apache/spark/streaming/flume/FlumeInputDStream.scala
index 6e7c3f3..13aa817 100644
--- a/external/flume/src/main/scala/org/apache/spark/streaming/flume/FlumeInputDStream.scala
+++ b/external/flume/src/main/scala/org/apache/spark/streaming/flume/FlumeInputDStream.scala
@@ -130,8 +130,10 @@ class FlumeEventServer(receiver: FlumeReceiver) extends AvroSourceProtocol {
   }
 }
 
-/** A NetworkReceiver which listens for events using the
-  * Flume Avro interface. */
+/**
+ * A NetworkReceiver which listens for events using the
+ * Flume Avro interface.
+ */
 private[streaming]
 class FlumeReceiver(
     host: String,

http://git-wip-us.apache.org/repos/asf/spark/blob/d717ae1f/mllib/src/main/scala/org/apache/spark/mllib/stat/distribution/MultivariateGaussian.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/stat/distribution/MultivariateGaussian.scala b/mllib/src/main/scala/org/apache/spark/mllib/stat/distribution/MultivariateGaussian.scala
index 052b5b1..6c6e9fb 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/stat/distribution/MultivariateGaussian.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/stat/distribution/MultivariateGaussian.scala
@@ -61,15 +61,17 @@ class MultivariateGaussian @Since("1.3.0") (
    */
   private val (rootSigmaInv: DBM[Double], u: Double) = calculateCovarianceConstants
 
-  /** Returns density of this multivariate Gaussian at given point, x
-    */
+  /**
+   * Returns density of this multivariate Gaussian at given point, x
+   */
    @Since("1.3.0")
   def pdf(x: Vector): Double = {
     pdf(x.toBreeze)
   }
 
-  /** Returns the log-density of this multivariate Gaussian at given point, x
-    */
+  /**
+   * Returns the log-density of this multivariate Gaussian at given point, x
+   */
    @Since("1.3.0")
   def logpdf(x: Vector): Double = {
     logpdf(x.toBreeze)

http://git-wip-us.apache.org/repos/asf/spark/blob/d717ae1f/scalastyle-config.xml
----------------------------------------------------------------------
diff --git a/scalastyle-config.xml b/scalastyle-config.xml
index 33c2cbd..472a8f4 100644
--- a/scalastyle-config.xml
+++ b/scalastyle-config.xml
@@ -223,6 +223,11 @@ This file is divided into 3 sections:
     ]]></customMessage>
   </check>
 
+  <check customId="NoScalaDoc" level="error" class="org.scalastyle.file.RegexChecker" enabled="true">
+    <parameters><parameter name="regex">(?m)^(\s*)/[*][*].*$(\r|)\n^\1  [*]</parameter></parameters>
+    <customMessage>Use Javadoc style indentation for multiline comments</customMessage>
+  </check>
+
   <!-- ================================================================================ -->
   <!--       rules we'd like to enforce, but haven't cleaned up the codebase yet        -->
   <!-- ================================================================================ -->

http://git-wip-us.apache.org/repos/asf/spark/blob/d717ae1f/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ScalaReflection.scala
----------------------------------------------------------------------
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ScalaReflection.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ScalaReflection.scala
index d241b8a..4795fc2 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ScalaReflection.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ScalaReflection.scala
@@ -762,15 +762,15 @@ trait ScalaReflection {
   }
 
   /**
-    * Returns the full class name for a type. The returned name is the canonical
-    * Scala name, where each component is separated by a period. It is NOT the
-    * Java-equivalent runtime name (no dollar signs).
-    *
-    * In simple cases, both the Scala and Java names are the same, however when Scala
-    * generates constructs that do not map to a Java equivalent, such as singleton objects
-    * or nested classes in package objects, it uses the dollar sign ($) to create
-    * synthetic classes, emulating behaviour in Java bytecode.
-    */
+   * Returns the full class name for a type. The returned name is the canonical
+   * Scala name, where each component is separated by a period. It is NOT the
+   * Java-equivalent runtime name (no dollar signs).
+   *
+   * In simple cases, both the Scala and Java names are the same, however when Scala
+   * generates constructs that do not map to a Java equivalent, such as singleton objects
+   * or nested classes in package objects, it uses the dollar sign ($) to create
+   * synthetic classes, emulating behaviour in Java bytecode.
+   */
   def getClassNameFromType(tpe: `Type`): String = {
     tpe.erasure.typeSymbol.asClass.fullName
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/d717ae1f/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala
----------------------------------------------------------------------
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala
index 1bebd4e..ee7f4fa 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala
@@ -626,15 +626,15 @@ abstract class CodeGenerator[InType <: AnyRef, OutType <: AnyRef] extends Loggin
 
 object CodeGenerator extends Logging {
   /**
-    * Compile the Java source code into a Java class, using Janino.
-    */
+   * Compile the Java source code into a Java class, using Janino.
+   */
   def compile(code: String): GeneratedClass = {
     cache.get(code)
   }
 
   /**
-    * Compile the Java source code into a Java class, using Janino.
-    */
+   * Compile the Java source code into a Java class, using Janino.
+   */
   private[this] def doCompile(code: String): GeneratedClass = {
     val evaluator = new ClassBodyEvaluator()
     evaluator.setParentClassLoader(Utils.getContextOrSparkClassLoader)

http://git-wip-us.apache.org/repos/asf/spark/blob/d717ae1f/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala
----------------------------------------------------------------------
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala
index 609a33e..0a11574 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala
@@ -211,8 +211,10 @@ abstract class QueryPlan[PlanType <: QueryPlan[PlanType]] extends TreeNode[PlanT
     if (changed) makeCopy(newArgs).asInstanceOf[this.type] else this
   }
 
-  /** Returns the result of running [[transformExpressions]] on this node
-    * and all its children. */
+  /**
+   * Returns the result of running [[transformExpressions]] on this node
+   * and all its children.
+   */
   def transformAllExpressions(rule: PartialFunction[Expression, Expression]): this.type = {
     transform {
       case q: QueryPlan[_] => q.transformExpressions(rule).asInstanceOf[PlanType]

http://git-wip-us.apache.org/repos/asf/spark/blob/d717ae1f/sql/core/src/main/scala/org/apache/spark/sql/RelationalGroupedDataset.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/RelationalGroupedDataset.scala b/sql/core/src/main/scala/org/apache/spark/sql/RelationalGroupedDataset.scala
index 91c0205..7dbf2e6 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/RelationalGroupedDataset.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/RelationalGroupedDataset.scala
@@ -408,7 +408,7 @@ private[sql] object RelationalGroupedDataset {
   private[sql] object RollupType extends GroupType
 
   /**
-    * To indicate it's the PIVOT
-    */
+   * To indicate it's the PIVOT
+   */
   private[sql] case class PivotType(pivotCol: Expression, values: Seq[Literal]) extends GroupType
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/d717ae1f/sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegen.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegen.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegen.scala
index 98129d6..c4594f0 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegen.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegen.scala
@@ -312,8 +312,8 @@ case class WholeStageCodegen(child: SparkPlan) extends UnaryNode with CodegenSup
       }
 
       /** Codegened pipeline for:
-        * ${toCommentSafeString(child.treeString.trim)}
-        */
+       * ${toCommentSafeString(child.treeString.trim)}
+       */
       final class GeneratedIterator extends org.apache.spark.sql.execution.BufferedRowIterator {
 
         private Object[] references;

http://git-wip-us.apache.org/repos/asf/spark/blob/d717ae1f/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SparkPlanGraph.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SparkPlanGraph.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SparkPlanGraph.scala
index 012b125..c6fcb69 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SparkPlanGraph.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SparkPlanGraph.scala
@@ -167,8 +167,8 @@ private[ui] class SparkPlanGraphNode(
 }
 
 /**
-  * Represent a tree of SparkPlan for WholeStageCodegen.
-  */
+ * Represent a tree of SparkPlan for WholeStageCodegen.
+ */
 private[ui] class SparkPlanGraphCluster(
     id: Long,
     name: String,

http://git-wip-us.apache.org/repos/asf/spark/blob/d717ae1f/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala
index cfe4911..948106f 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala
@@ -100,11 +100,11 @@ abstract class JdbcDialect extends Serializable {
   }
 
   /**
-    * Override connection specific properties to run before a select is made.  This is in place to
-    * allow dialects that need special treatment to optimize behavior.
-    * @param connection The connection object
-    * @param properties The connection properties.  This is passed through from the relation.
-    */
+   * Override connection specific properties to run before a select is made.  This is in place to
+   * allow dialects that need special treatment to optimize behavior.
+   * @param connection The connection object
+   * @param properties The connection properties.  This is passed through from the relation.
+   */
   def beforeFetch(connection: Connection, properties: Map[String, String]): Unit = {
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org