You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by me...@apache.org on 2015/05/04 20:29:05 UTC

[2/3] spark git commit: [SPARK-5956] [MLLIB] Pipeline components should be copyable.

http://git-wip-us.apache.org/repos/asf/spark/blob/e0833c59/mllib/src/main/scala/org/apache/spark/ml/feature/StringIndexer.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/ml/feature/StringIndexer.scala b/mllib/src/main/scala/org/apache/spark/ml/feature/StringIndexer.scala
index 9db3b29..3d78537 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/feature/StringIndexer.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/feature/StringIndexer.scala
@@ -34,18 +34,17 @@ import org.apache.spark.util.collection.OpenHashMap
 private[feature] trait StringIndexerBase extends Params with HasInputCol with HasOutputCol {
 
   /** Validates and transforms the input schema. */
-  protected def validateAndTransformSchema(schema: StructType, paramMap: ParamMap): StructType = {
-    val map = extractParamMap(paramMap)
-    val inputColName = map(inputCol)
+  protected def validateAndTransformSchema(schema: StructType): StructType = {
+    val inputColName = $(inputCol)
     val inputDataType = schema(inputColName).dataType
     require(inputDataType == StringType || inputDataType.isInstanceOf[NumericType],
       s"The input column $inputColName must be either string type or numeric type, " +
         s"but got $inputDataType.")
     val inputFields = schema.fields
-    val outputColName = map(outputCol)
+    val outputColName = $(outputCol)
     require(inputFields.forall(_.name != outputColName),
       s"Output column $outputColName already exists.")
-    val attr = NominalAttribute.defaultAttr.withName(map(outputCol))
+    val attr = NominalAttribute.defaultAttr.withName($(outputCol))
     val outputFields = inputFields :+ attr.toStructField()
     StructType(outputFields)
   }
@@ -69,19 +68,16 @@ class StringIndexer extends Estimator[StringIndexerModel] with StringIndexerBase
 
   // TODO: handle unseen labels
 
-  override def fit(dataset: DataFrame, paramMap: ParamMap): StringIndexerModel = {
-    val map = extractParamMap(paramMap)
-    val counts = dataset.select(col(map(inputCol)).cast(StringType))
+  override def fit(dataset: DataFrame): StringIndexerModel = {
+    val counts = dataset.select(col($(inputCol)).cast(StringType))
       .map(_.getString(0))
       .countByValue()
     val labels = counts.toSeq.sortBy(-_._2).map(_._1).toArray
-    val model = new StringIndexerModel(this, map, labels)
-    Params.inheritValues(map, this, model)
-    model
+    copyValues(new StringIndexerModel(this, labels))
   }
 
-  override def transformSchema(schema: StructType, paramMap: ParamMap): StructType = {
-    validateAndTransformSchema(schema, paramMap)
+  override def transformSchema(schema: StructType): StructType = {
+    validateAndTransformSchema(schema)
   }
 }
 
@@ -92,7 +88,6 @@ class StringIndexer extends Estimator[StringIndexerModel] with StringIndexerBase
 @AlphaComponent
 class StringIndexerModel private[ml] (
     override val parent: StringIndexer,
-    override val fittingParamMap: ParamMap,
     labels: Array[String]) extends Model[StringIndexerModel] with StringIndexerBase {
 
   private val labelToIndex: OpenHashMap[String, Double] = {
@@ -112,8 +107,7 @@ class StringIndexerModel private[ml] (
   /** @group setParam */
   def setOutputCol(value: String): this.type = set(outputCol, value)
 
-  override def transform(dataset: DataFrame, paramMap: ParamMap): DataFrame = {
-    val map = extractParamMap(paramMap)
+  override def transform(dataset: DataFrame): DataFrame = {
     val indexer = udf { label: String =>
       if (labelToIndex.contains(label)) {
         labelToIndex(label)
@@ -122,14 +116,14 @@ class StringIndexerModel private[ml] (
         throw new SparkException(s"Unseen label: $label.")
       }
     }
-    val outputColName = map(outputCol)
+    val outputColName = $(outputCol)
     val metadata = NominalAttribute.defaultAttr
       .withName(outputColName).withValues(labels).toMetadata()
     dataset.select(col("*"),
-      indexer(dataset(map(inputCol)).cast(StringType)).as(outputColName, metadata))
+      indexer(dataset($(inputCol)).cast(StringType)).as(outputColName, metadata))
   }
 
-  override def transformSchema(schema: StructType, paramMap: ParamMap): StructType = {
-    validateAndTransformSchema(schema, paramMap)
+  override def transformSchema(schema: StructType): StructType = {
+    validateAndTransformSchema(schema)
   }
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/e0833c59/mllib/src/main/scala/org/apache/spark/ml/feature/Tokenizer.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/ml/feature/Tokenizer.scala b/mllib/src/main/scala/org/apache/spark/ml/feature/Tokenizer.scala
index 01752ba..2863b76 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/feature/Tokenizer.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/feature/Tokenizer.scala
@@ -20,7 +20,7 @@ package org.apache.spark.ml.feature
 import org.apache.spark.annotation.AlphaComponent
 import org.apache.spark.ml.UnaryTransformer
 import org.apache.spark.ml.param._
-import org.apache.spark.sql.types.{DataType, StringType, ArrayType}
+import org.apache.spark.sql.types.{ArrayType, DataType, StringType}
 
 /**
  * :: AlphaComponent ::
@@ -29,7 +29,7 @@ import org.apache.spark.sql.types.{DataType, StringType, ArrayType}
 @AlphaComponent
 class Tokenizer extends UnaryTransformer[String, Seq[String], Tokenizer] {
 
-  override protected def createTransformFunc(paramMap: ParamMap): String => Seq[String] = {
+  override protected def createTransformFunc: String => Seq[String] = {
     _.toLowerCase.split("\\s")
   }
 
@@ -62,7 +62,7 @@ class RegexTokenizer extends UnaryTransformer[String, Seq[String], RegexTokenize
   def setMinTokenLength(value: Int): this.type = set(minTokenLength, value)
 
   /** @group getParam */
-  def getMinTokenLength: Int = getOrDefault(minTokenLength)
+  def getMinTokenLength: Int = $(minTokenLength)
 
   /**
    * Indicates whether regex splits on gaps (true) or matching tokens (false).
@@ -75,7 +75,7 @@ class RegexTokenizer extends UnaryTransformer[String, Seq[String], RegexTokenize
   def setGaps(value: Boolean): this.type = set(gaps, value)
 
   /** @group getParam */
-  def getGaps: Boolean = getOrDefault(gaps)
+  def getGaps: Boolean = $(gaps)
 
   /**
    * Regex pattern used by tokenizer.
@@ -88,14 +88,14 @@ class RegexTokenizer extends UnaryTransformer[String, Seq[String], RegexTokenize
   def setPattern(value: String): this.type = set(pattern, value)
 
   /** @group getParam */
-  def getPattern: String = getOrDefault(pattern)
+  def getPattern: String = $(pattern)
 
   setDefault(minTokenLength -> 1, gaps -> false, pattern -> "\\p{L}+|[^\\p{L}\\s]+")
 
-  override protected def createTransformFunc(paramMap: ParamMap): String => Seq[String] = { str =>
-    val re = paramMap(pattern).r
-    val tokens = if (paramMap(gaps)) re.split(str).toSeq else re.findAllIn(str).toSeq
-    val minLength = paramMap(minTokenLength)
+  override protected def createTransformFunc: String => Seq[String] = { str =>
+    val re = $(pattern).r
+    val tokens = if ($(gaps)) re.split(str).toSeq else re.findAllIn(str).toSeq
+    val minLength = $(minTokenLength)
     tokens.filter(_.length >= minLength)
   }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/e0833c59/mllib/src/main/scala/org/apache/spark/ml/feature/VectorAssembler.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/ml/feature/VectorAssembler.scala b/mllib/src/main/scala/org/apache/spark/ml/feature/VectorAssembler.scala
index 5e781a3..8f2e62a 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/feature/VectorAssembler.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/feature/VectorAssembler.scala
@@ -22,7 +22,6 @@ import scala.collection.mutable.ArrayBuilder
 import org.apache.spark.SparkException
 import org.apache.spark.annotation.AlphaComponent
 import org.apache.spark.ml.Transformer
-import org.apache.spark.ml.param.ParamMap
 import org.apache.spark.ml.param.shared._
 import org.apache.spark.mllib.linalg.{Vector, VectorUDT, Vectors}
 import org.apache.spark.sql.{DataFrame, Row}
@@ -42,13 +41,12 @@ class VectorAssembler extends Transformer with HasInputCols with HasOutputCol {
   /** @group setParam */
   def setOutputCol(value: String): this.type = set(outputCol, value)
 
-  override def transform(dataset: DataFrame, paramMap: ParamMap): DataFrame = {
-    val map = extractParamMap(paramMap)
+  override def transform(dataset: DataFrame): DataFrame = {
     val assembleFunc = udf { r: Row =>
       VectorAssembler.assemble(r.toSeq: _*)
     }
     val schema = dataset.schema
-    val inputColNames = map(inputCols)
+    val inputColNames = $(inputCols)
     val args = inputColNames.map { c =>
       schema(c).dataType match {
         case DoubleType => dataset(c)
@@ -56,13 +54,12 @@ class VectorAssembler extends Transformer with HasInputCols with HasOutputCol {
         case _: NumericType | BooleanType => dataset(c).cast(DoubleType).as(s"${c}_double_$uid")
       }
     }
-    dataset.select(col("*"), assembleFunc(struct(args : _*)).as(map(outputCol)))
+    dataset.select(col("*"), assembleFunc(struct(args : _*)).as($(outputCol)))
   }
 
-  override def transformSchema(schema: StructType, paramMap: ParamMap): StructType = {
-    val map = extractParamMap(paramMap)
-    val inputColNames = map(inputCols)
-    val outputColName = map(outputCol)
+  override def transformSchema(schema: StructType): StructType = {
+    val inputColNames = $(inputCols)
+    val outputColName = $(outputCol)
     val inputDataTypes = inputColNames.map(name => schema(name).dataType)
     inputDataTypes.foreach {
       case _: NumericType | BooleanType =>

http://git-wip-us.apache.org/repos/asf/spark/blob/e0833c59/mllib/src/main/scala/org/apache/spark/ml/feature/VectorIndexer.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/ml/feature/VectorIndexer.scala b/mllib/src/main/scala/org/apache/spark/ml/feature/VectorIndexer.scala
index ed833c6..07ea579 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/feature/VectorIndexer.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/feature/VectorIndexer.scala
@@ -18,19 +18,17 @@
 package org.apache.spark.ml.feature
 
 import org.apache.spark.annotation.AlphaComponent
-import org.apache.spark.ml.util.SchemaUtils
 import org.apache.spark.ml.{Estimator, Model}
-import org.apache.spark.ml.attribute.{BinaryAttribute, NumericAttribute, NominalAttribute,
-  Attribute, AttributeGroup}
-import org.apache.spark.ml.param.{ParamValidators, IntParam, ParamMap, Params}
+import org.apache.spark.ml.attribute.{Attribute, AttributeGroup, BinaryAttribute, NominalAttribute, NumericAttribute}
+import org.apache.spark.ml.param.{IntParam, ParamValidators, Params}
 import org.apache.spark.ml.param.shared._
-import org.apache.spark.mllib.linalg.{SparseVector, DenseVector, Vector, VectorUDT}
-import org.apache.spark.sql.{Row, DataFrame}
+import org.apache.spark.ml.util.SchemaUtils
+import org.apache.spark.mllib.linalg.{DenseVector, SparseVector, Vector, VectorUDT}
+import org.apache.spark.sql.{DataFrame, Row}
 import org.apache.spark.sql.functions.callUDF
 import org.apache.spark.sql.types.{StructField, StructType}
 import org.apache.spark.util.collection.OpenHashSet
 
-
 /** Private trait for params for VectorIndexer and VectorIndexerModel */
 private[ml] trait VectorIndexerParams extends Params with HasInputCol with HasOutputCol {
 
@@ -49,7 +47,7 @@ private[ml] trait VectorIndexerParams extends Params with HasInputCol with HasOu
   setDefault(maxCategories -> 20)
 
   /** @group getParam */
-  def getMaxCategories: Int = getOrDefault(maxCategories)
+  def getMaxCategories: Int = $(maxCategories)
 }
 
 /**
@@ -100,33 +98,29 @@ class VectorIndexer extends Estimator[VectorIndexerModel] with VectorIndexerPara
   /** @group setParam */
   def setOutputCol(value: String): this.type = set(outputCol, value)
 
-  override def fit(dataset: DataFrame, paramMap: ParamMap): VectorIndexerModel = {
-    transformSchema(dataset.schema, paramMap, logging = true)
-    val map = extractParamMap(paramMap)
-    val firstRow = dataset.select(map(inputCol)).take(1)
+  override def fit(dataset: DataFrame): VectorIndexerModel = {
+    transformSchema(dataset.schema, logging = true)
+    val firstRow = dataset.select($(inputCol)).take(1)
     require(firstRow.length == 1, s"VectorIndexer cannot be fit on an empty dataset.")
     val numFeatures = firstRow(0).getAs[Vector](0).size
-    val vectorDataset = dataset.select(map(inputCol)).map { case Row(v: Vector) => v }
-    val maxCats = map(maxCategories)
+    val vectorDataset = dataset.select($(inputCol)).map { case Row(v: Vector) => v }
+    val maxCats = $(maxCategories)
     val categoryStats: VectorIndexer.CategoryStats = vectorDataset.mapPartitions { iter =>
       val localCatStats = new VectorIndexer.CategoryStats(numFeatures, maxCats)
       iter.foreach(localCatStats.addVector)
       Iterator(localCatStats)
     }.reduce((stats1, stats2) => stats1.merge(stats2))
-    val model = new VectorIndexerModel(this, map, numFeatures, categoryStats.getCategoryMaps)
-    Params.inheritValues(map, this, model)
-    model
+    copyValues(new VectorIndexerModel(this, numFeatures, categoryStats.getCategoryMaps))
   }
 
-  override def transformSchema(schema: StructType, paramMap: ParamMap): StructType = {
+  override def transformSchema(schema: StructType): StructType = {
     // We do not transfer feature metadata since we do not know what types of features we will
     // produce in transform().
-    val map = extractParamMap(paramMap)
     val dataType = new VectorUDT
-    require(map.contains(inputCol), s"VectorIndexer requires input column parameter: $inputCol")
-    require(map.contains(outputCol), s"VectorIndexer requires output column parameter: $outputCol")
-    SchemaUtils.checkColumnType(schema, map(inputCol), dataType)
-    SchemaUtils.appendColumn(schema, map(outputCol), dataType)
+    require(isDefined(inputCol), s"VectorIndexer requires input column parameter: $inputCol")
+    require(isDefined(outputCol), s"VectorIndexer requires output column parameter: $outputCol")
+    SchemaUtils.checkColumnType(schema, $(inputCol), dataType)
+    SchemaUtils.appendColumn(schema, $(outputCol), dataType)
   }
 }
 
@@ -243,7 +237,6 @@ private object VectorIndexer {
 @AlphaComponent
 class VectorIndexerModel private[ml] (
     override val parent: VectorIndexer,
-    override val fittingParamMap: ParamMap,
     val numFeatures: Int,
     val categoryMaps: Map[Int, Map[Double, Int]])
   extends Model[VectorIndexerModel] with VectorIndexerParams {
@@ -326,35 +319,33 @@ class VectorIndexerModel private[ml] (
   /** @group setParam */
   def setOutputCol(value: String): this.type = set(outputCol, value)
 
-  override def transform(dataset: DataFrame, paramMap: ParamMap): DataFrame = {
-    transformSchema(dataset.schema, paramMap, logging = true)
-    val map = extractParamMap(paramMap)
-    val newField = prepOutputField(dataset.schema, map)
-    val newCol = callUDF(transformFunc, new VectorUDT, dataset(map(inputCol)))
-    dataset.withColumn(map(outputCol), newCol.as(map(outputCol), newField.metadata))
+  override def transform(dataset: DataFrame): DataFrame = {
+    transformSchema(dataset.schema, logging = true)
+    val newField = prepOutputField(dataset.schema)
+    val newCol = callUDF(transformFunc, new VectorUDT, dataset($(inputCol)))
+    dataset.withColumn($(outputCol), newCol.as($(outputCol), newField.metadata))
   }
 
-  override def transformSchema(schema: StructType, paramMap: ParamMap): StructType = {
-    val map = extractParamMap(paramMap)
+  override def transformSchema(schema: StructType): StructType = {
     val dataType = new VectorUDT
-    require(map.contains(inputCol),
+    require(isDefined(inputCol),
       s"VectorIndexerModel requires input column parameter: $inputCol")
-    require(map.contains(outputCol),
+    require(isDefined(outputCol),
       s"VectorIndexerModel requires output column parameter: $outputCol")
-    SchemaUtils.checkColumnType(schema, map(inputCol), dataType)
+    SchemaUtils.checkColumnType(schema, $(inputCol), dataType)
 
     // If the input metadata specifies numFeatures, compare with expected numFeatures.
-    val origAttrGroup = AttributeGroup.fromStructField(schema(map(inputCol)))
+    val origAttrGroup = AttributeGroup.fromStructField(schema($(inputCol)))
     val origNumFeatures: Option[Int] = if (origAttrGroup.attributes.nonEmpty) {
       Some(origAttrGroup.attributes.get.length)
     } else {
       origAttrGroup.numAttributes
     }
     require(origNumFeatures.forall(_ == numFeatures), "VectorIndexerModel expected" +
-      s" $numFeatures features, but input column ${map(inputCol)} had metadata specifying" +
+      s" $numFeatures features, but input column ${$(inputCol)} had metadata specifying" +
       s" ${origAttrGroup.numAttributes.get} features.")
 
-    val newField = prepOutputField(schema, map)
+    val newField = prepOutputField(schema)
     val outputFields = schema.fields :+ newField
     StructType(outputFields)
   }
@@ -362,11 +353,10 @@ class VectorIndexerModel private[ml] (
   /**
    * Prepare the output column field, including per-feature metadata.
    * @param schema  Input schema
-   * @param map  Parameter map (with this class' embedded parameter map folded in)
    * @return  Output column field.  This field does not contain non-ML metadata.
    */
-  private def prepOutputField(schema: StructType, map: ParamMap): StructField = {
-    val origAttrGroup = AttributeGroup.fromStructField(schema(map(inputCol)))
+  private def prepOutputField(schema: StructType): StructField = {
+    val origAttrGroup = AttributeGroup.fromStructField(schema($(inputCol)))
     val featureAttributes: Array[Attribute] = if (origAttrGroup.attributes.nonEmpty) {
       // Convert original attributes to modified attributes
       val origAttrs: Array[Attribute] = origAttrGroup.attributes.get
@@ -389,7 +379,7 @@ class VectorIndexerModel private[ml] (
     } else {
       partialFeatureAttributes
     }
-    val newAttributeGroup = new AttributeGroup(map(outputCol), featureAttributes)
+    val newAttributeGroup = new AttributeGroup($(outputCol), featureAttributes)
     newAttributeGroup.toStructField()
   }
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/e0833c59/mllib/src/main/scala/org/apache/spark/ml/feature/Word2Vec.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/ml/feature/Word2Vec.scala b/mllib/src/main/scala/org/apache/spark/ml/feature/Word2Vec.scala
index 0163fa8..34ff929 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/feature/Word2Vec.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/feature/Word2Vec.scala
@@ -18,16 +18,16 @@
 package org.apache.spark.ml.feature
 
 import org.apache.spark.annotation.AlphaComponent
+import org.apache.spark.ml.{Estimator, Model}
 import org.apache.spark.ml.param._
 import org.apache.spark.ml.param.shared._
 import org.apache.spark.ml.util.SchemaUtils
-import org.apache.spark.ml.{Estimator, Model}
 import org.apache.spark.mllib.feature
-import org.apache.spark.mllib.linalg.BLAS._
 import org.apache.spark.mllib.linalg.{VectorUDT, Vectors}
+import org.apache.spark.mllib.linalg.BLAS._
+import org.apache.spark.sql.DataFrame
 import org.apache.spark.sql.functions._
 import org.apache.spark.sql.types._
-import org.apache.spark.sql.{DataFrame, Row}
 
 /**
  * Params for [[Word2Vec]] and [[Word2VecModel]].
@@ -43,7 +43,7 @@ private[feature] trait Word2VecBase extends Params
   setDefault(vectorSize -> 100)
 
   /** @group getParam */
-  def getVectorSize: Int = getOrDefault(vectorSize)
+  def getVectorSize: Int = $(vectorSize)
 
   /**
    * Number of partitions for sentences of words.
@@ -53,7 +53,7 @@ private[feature] trait Word2VecBase extends Params
   setDefault(numPartitions -> 1)
 
   /** @group getParam */
-  def getNumPartitions: Int = getOrDefault(numPartitions)
+  def getNumPartitions: Int = $(numPartitions)
 
   /**
    * The minimum number of times a token must appear to be included in the word2vec model's
@@ -64,7 +64,7 @@ private[feature] trait Word2VecBase extends Params
   setDefault(minCount -> 5)
 
   /** @group getParam */
-  def getMinCount: Int = getOrDefault(minCount)
+  def getMinCount: Int = $(minCount)
 
   setDefault(stepSize -> 0.025)
   setDefault(maxIter -> 1)
@@ -73,10 +73,9 @@ private[feature] trait Word2VecBase extends Params
   /**
    * Validate and transform the input schema.
    */
-  protected def validateAndTransformSchema(schema: StructType, paramMap: ParamMap): StructType = {
-    val map = extractParamMap(paramMap)
-    SchemaUtils.checkColumnType(schema, map(inputCol), new ArrayType(StringType, true))
-    SchemaUtils.appendColumn(schema, map(outputCol), new VectorUDT)
+  protected def validateAndTransformSchema(schema: StructType): StructType = {
+    SchemaUtils.checkColumnType(schema, $(inputCol), new ArrayType(StringType, true))
+    SchemaUtils.appendColumn(schema, $(outputCol), new VectorUDT)
   }
 }
 
@@ -112,25 +111,22 @@ final class Word2Vec extends Estimator[Word2VecModel] with Word2VecBase {
   /** @group setParam */
   def setMinCount(value: Int): this.type = set(minCount, value)
 
-  override def fit(dataset: DataFrame, paramMap: ParamMap): Word2VecModel = {
-    transformSchema(dataset.schema, paramMap, logging = true)
-    val map = extractParamMap(paramMap)
-    val input = dataset.select(map(inputCol)).map { case Row(v: Seq[String]) => v }
+  override def fit(dataset: DataFrame): Word2VecModel = {
+    transformSchema(dataset.schema, logging = true)
+    val input = dataset.select($(inputCol)).map(_.getAs[Seq[String]](0))
     val wordVectors = new feature.Word2Vec()
-      .setLearningRate(map(stepSize))
-      .setMinCount(map(minCount))
-      .setNumIterations(map(maxIter))
-      .setNumPartitions(map(numPartitions))
-      .setSeed(map(seed))
-      .setVectorSize(map(vectorSize))
+      .setLearningRate($(stepSize))
+      .setMinCount($(minCount))
+      .setNumIterations($(maxIter))
+      .setNumPartitions($(numPartitions))
+      .setSeed($(seed))
+      .setVectorSize($(vectorSize))
       .fit(input)
-    val model = new Word2VecModel(this, map, wordVectors)
-    Params.inheritValues(map, this, model)
-    model
+    copyValues(new Word2VecModel(this, wordVectors))
   }
 
-  override def transformSchema(schema: StructType, paramMap: ParamMap): StructType = {
-    validateAndTransformSchema(schema, paramMap)
+  override def transformSchema(schema: StructType): StructType = {
+    validateAndTransformSchema(schema)
   }
 }
 
@@ -141,7 +137,6 @@ final class Word2Vec extends Estimator[Word2VecModel] with Word2VecBase {
 @AlphaComponent
 class Word2VecModel private[ml] (
     override val parent: Word2Vec,
-    override val fittingParamMap: ParamMap,
     wordVectors: feature.Word2VecModel)
   extends Model[Word2VecModel] with Word2VecBase {
 
@@ -155,15 +150,14 @@ class Word2VecModel private[ml] (
    * Transform a sentence column to a vector column to represent the whole sentence. The transform
    * is performed by averaging all word vectors it contains.
    */
-  override def transform(dataset: DataFrame, paramMap: ParamMap): DataFrame = {
-    transformSchema(dataset.schema, paramMap, logging = true)
-    val map = extractParamMap(paramMap)
+  override def transform(dataset: DataFrame): DataFrame = {
+    transformSchema(dataset.schema, logging = true)
     val bWordVectors = dataset.sqlContext.sparkContext.broadcast(wordVectors)
     val word2Vec = udf { sentence: Seq[String] =>
       if (sentence.size == 0) {
-        Vectors.sparse(map(vectorSize), Array.empty[Int], Array.empty[Double])
+        Vectors.sparse($(vectorSize), Array.empty[Int], Array.empty[Double])
       } else {
-        val cum = Vectors.zeros(map(vectorSize))
+        val cum = Vectors.zeros($(vectorSize))
         val model = bWordVectors.value.getVectors
         for (word <- sentence) {
           if (model.contains(word)) {
@@ -176,10 +170,10 @@ class Word2VecModel private[ml] (
         cum
       }
     }
-    dataset.withColumn(map(outputCol), word2Vec(col(map(inputCol))))
+    dataset.withColumn($(outputCol), word2Vec(col($(inputCol))))
   }
 
-  override def transformSchema(schema: StructType, paramMap: ParamMap): StructType = {
-    validateAndTransformSchema(schema, paramMap)
+  override def transformSchema(schema: StructType): StructType = {
+    validateAndTransformSchema(schema)
   }
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/e0833c59/mllib/src/main/scala/org/apache/spark/ml/impl/estimator/Predictor.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/ml/impl/estimator/Predictor.scala b/mllib/src/main/scala/org/apache/spark/ml/impl/estimator/Predictor.scala
index 195333a..e8b3628 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/impl/estimator/Predictor.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/impl/estimator/Predictor.scala
@@ -18,18 +18,17 @@
 package org.apache.spark.ml.impl.estimator
 
 import org.apache.spark.annotation.{AlphaComponent, DeveloperApi}
-import org.apache.spark.ml.util.SchemaUtils
 import org.apache.spark.ml.{Estimator, Model}
 import org.apache.spark.ml.param._
 import org.apache.spark.ml.param.shared._
-import org.apache.spark.mllib.linalg.{VectorUDT, Vector}
+import org.apache.spark.ml.util.SchemaUtils
+import org.apache.spark.mllib.linalg.{Vector, VectorUDT}
 import org.apache.spark.mllib.regression.LabeledPoint
 import org.apache.spark.rdd.RDD
 import org.apache.spark.sql.{DataFrame, Row}
 import org.apache.spark.sql.functions._
 import org.apache.spark.sql.types.{DataType, DoubleType, StructType}
 
-
 /**
  * :: DeveloperApi ::
  *
@@ -44,7 +43,6 @@ private[spark] trait PredictorParams extends Params
   /**
    * Validates and transforms the input schema with the provided param map.
    * @param schema input schema
-   * @param paramMap additional parameters
    * @param fitting whether this is in fitting
    * @param featuresDataType  SQL DataType for FeaturesType.
    *                          E.g., [[org.apache.spark.mllib.linalg.VectorUDT]] for vector features.
@@ -52,17 +50,15 @@ private[spark] trait PredictorParams extends Params
    */
   protected def validateAndTransformSchema(
       schema: StructType,
-      paramMap: ParamMap,
       fitting: Boolean,
       featuresDataType: DataType): StructType = {
-    val map = extractParamMap(paramMap)
     // TODO: Support casting Array[Double] and Array[Float] to Vector when FeaturesType = Vector
-    SchemaUtils.checkColumnType(schema, map(featuresCol), featuresDataType)
+    SchemaUtils.checkColumnType(schema, $(featuresCol), featuresDataType)
     if (fitting) {
       // TODO: Allow other numeric types
-      SchemaUtils.checkColumnType(schema, map(labelCol), DoubleType)
+      SchemaUtils.checkColumnType(schema, $(labelCol), DoubleType)
     }
-    SchemaUtils.appendColumn(schema, map(predictionCol), DoubleType)
+    SchemaUtils.appendColumn(schema, $(predictionCol), DoubleType)
   }
 }
 
@@ -96,14 +92,15 @@ private[spark] abstract class Predictor[
   /** @group setParam */
   def setPredictionCol(value: String): Learner = set(predictionCol, value).asInstanceOf[Learner]
 
-  override def fit(dataset: DataFrame, paramMap: ParamMap): M = {
+  override def fit(dataset: DataFrame): M = {
     // This handles a few items such as schema validation.
     // Developers only need to implement train().
-    transformSchema(dataset.schema, paramMap, logging = true)
-    val map = extractParamMap(paramMap)
-    val model = train(dataset, map)
-    Params.inheritValues(map, this, model) // copy params to model
-    model
+    transformSchema(dataset.schema, logging = true)
+    copyValues(train(dataset))
+  }
+
+  override def copy(extra: ParamMap): Learner = {
+    super.copy(extra).asInstanceOf[Learner]
   }
 
   /**
@@ -114,12 +111,10 @@ private[spark] abstract class Predictor[
    * and copying parameters into the model.
    *
    * @param dataset  Training dataset
-   * @param paramMap  Parameter map.  Unlike [[fit()]]'s paramMap, this paramMap has already
-   *                  been combined with the embedded ParamMap.
    * @return  Fitted model
    */
   @DeveloperApi
-  protected def train(dataset: DataFrame, paramMap: ParamMap): M
+  protected def train(dataset: DataFrame): M
 
   /**
    * :: DeveloperApi ::
@@ -134,17 +129,16 @@ private[spark] abstract class Predictor[
   @DeveloperApi
   protected def featuresDataType: DataType = new VectorUDT
 
-  override def transformSchema(schema: StructType, paramMap: ParamMap): StructType = {
-    validateAndTransformSchema(schema, paramMap, fitting = true, featuresDataType)
+  override def transformSchema(schema: StructType): StructType = {
+    validateAndTransformSchema(schema, fitting = true, featuresDataType)
   }
 
   /**
    * Extract [[labelCol]] and [[featuresCol]] from the given dataset,
    * and put it in an RDD with strong types.
    */
-  protected def extractLabeledPoints(dataset: DataFrame, paramMap: ParamMap): RDD[LabeledPoint] = {
-    val map = extractParamMap(paramMap)
-    dataset.select(map(labelCol), map(featuresCol))
+  protected def extractLabeledPoints(dataset: DataFrame): RDD[LabeledPoint] = {
+    dataset.select($(labelCol), $(featuresCol))
       .map { case Row(label: Double, features: Vector) =>
       LabeledPoint(label, features)
     }
@@ -186,8 +180,8 @@ private[spark] abstract class PredictionModel[FeaturesType, M <: PredictionModel
   @DeveloperApi
   protected def featuresDataType: DataType = new VectorUDT
 
-  override def transformSchema(schema: StructType, paramMap: ParamMap): StructType = {
-    validateAndTransformSchema(schema, paramMap, fitting = false, featuresDataType)
+  override def transformSchema(schema: StructType): StructType = {
+    validateAndTransformSchema(schema, fitting = false, featuresDataType)
   }
 
   /**
@@ -195,30 +189,16 @@ private[spark] abstract class PredictionModel[FeaturesType, M <: PredictionModel
    * the predictions as a new column [[predictionCol]].
    *
    * @param dataset input dataset
-   * @param paramMap additional parameters, overwrite embedded params
    * @return transformed dataset with [[predictionCol]] of type [[Double]]
    */
-  override def transform(dataset: DataFrame, paramMap: ParamMap): DataFrame = {
+  override def transform(dataset: DataFrame): DataFrame = {
     // This default implementation should be overridden as needed.
 
     // Check schema
-    transformSchema(dataset.schema, paramMap, logging = true)
-    val map = extractParamMap(paramMap)
-
-    // Prepare model
-    val tmpModel = if (paramMap.size != 0) {
-      val tmpModel = this.copy()
-      Params.inheritValues(paramMap, parent, tmpModel)
-      tmpModel
-    } else {
-      this
-    }
+    transformSchema(dataset.schema, logging = true)
 
-    if (map(predictionCol) != "") {
-      val pred: FeaturesType => Double = (features) => {
-        tmpModel.predict(features)
-      }
-      dataset.withColumn(map(predictionCol), callUDF(pred, DoubleType, col(map(featuresCol))))
+    if ($(predictionCol) != "") {
+      dataset.withColumn($(predictionCol), callUDF(predict _, DoubleType, col($(featuresCol))))
     } else {
       this.logWarning(s"$uid: Predictor.transform() was called as NOOP" +
         " since no output columns were set.")
@@ -234,10 +214,4 @@ private[spark] abstract class PredictionModel[FeaturesType, M <: PredictionModel
    */
   @DeveloperApi
   protected def predict(features: FeaturesType): Double
-
-  /**
-   * Create a copy of the model.
-   * The copy is shallow, except for the embedded paramMap, which gets a deep copy.
-   */
-  protected def copy(): M
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/e0833c59/mllib/src/main/scala/org/apache/spark/ml/impl/tree/treeParams.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/ml/impl/tree/treeParams.scala b/mllib/src/main/scala/org/apache/spark/ml/impl/tree/treeParams.scala
index fb77062..0e22562 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/impl/tree/treeParams.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/impl/tree/treeParams.scala
@@ -20,14 +20,11 @@ package org.apache.spark.ml.impl.tree
 import org.apache.spark.annotation.DeveloperApi
 import org.apache.spark.ml.impl.estimator.PredictorParams
 import org.apache.spark.ml.param._
-import org.apache.spark.ml.param.shared.{HasSeed, HasMaxIter}
-import org.apache.spark.mllib.tree.configuration.{Algo => OldAlgo,
-  BoostingStrategy => OldBoostingStrategy, Strategy => OldStrategy}
-import org.apache.spark.mllib.tree.impurity.{Gini => OldGini, Entropy => OldEntropy,
-  Impurity => OldImpurity, Variance => OldVariance}
+import org.apache.spark.ml.param.shared.{HasMaxIter, HasSeed}
+import org.apache.spark.mllib.tree.configuration.{Algo => OldAlgo, BoostingStrategy => OldBoostingStrategy, Strategy => OldStrategy}
+import org.apache.spark.mllib.tree.impurity.{Entropy => OldEntropy, Gini => OldGini, Impurity => OldImpurity, Variance => OldVariance}
 import org.apache.spark.mllib.tree.loss.{Loss => OldLoss}
 
-
 /**
  * :: DeveloperApi ::
  * Parameters for Decision Tree-based algorithms.
@@ -123,43 +120,43 @@ private[ml] trait DecisionTreeParams extends PredictorParams {
   def setMaxDepth(value: Int): this.type = set(maxDepth, value)
 
   /** @group getParam */
-  final def getMaxDepth: Int = getOrDefault(maxDepth)
+  final def getMaxDepth: Int = $(maxDepth)
 
   /** @group setParam */
   def setMaxBins(value: Int): this.type = set(maxBins, value)
 
   /** @group getParam */
-  final def getMaxBins: Int = getOrDefault(maxBins)
+  final def getMaxBins: Int = $(maxBins)
 
   /** @group setParam */
   def setMinInstancesPerNode(value: Int): this.type = set(minInstancesPerNode, value)
 
   /** @group getParam */
-  final def getMinInstancesPerNode: Int = getOrDefault(minInstancesPerNode)
+  final def getMinInstancesPerNode: Int = $(minInstancesPerNode)
 
   /** @group setParam */
   def setMinInfoGain(value: Double): this.type = set(minInfoGain, value)
 
   /** @group getParam */
-  final def getMinInfoGain: Double = getOrDefault(minInfoGain)
+  final def getMinInfoGain: Double = $(minInfoGain)
 
   /** @group expertSetParam */
   def setMaxMemoryInMB(value: Int): this.type = set(maxMemoryInMB, value)
 
   /** @group expertGetParam */
-  final def getMaxMemoryInMB: Int = getOrDefault(maxMemoryInMB)
+  final def getMaxMemoryInMB: Int = $(maxMemoryInMB)
 
   /** @group expertSetParam */
   def setCacheNodeIds(value: Boolean): this.type = set(cacheNodeIds, value)
 
   /** @group expertGetParam */
-  final def getCacheNodeIds: Boolean = getOrDefault(cacheNodeIds)
+  final def getCacheNodeIds: Boolean = $(cacheNodeIds)
 
   /** @group expertSetParam */
   def setCheckpointInterval(value: Int): this.type = set(checkpointInterval, value)
 
   /** @group expertGetParam */
-  final def getCheckpointInterval: Int = getOrDefault(checkpointInterval)
+  final def getCheckpointInterval: Int = $(checkpointInterval)
 
   /** (private[ml]) Create a Strategy instance to use with the old API. */
   private[ml] def getOldStrategy(
@@ -206,7 +203,7 @@ private[ml] trait TreeClassifierParams extends Params {
   def setImpurity(value: String): this.type = set(impurity, value)
 
   /** @group getParam */
-  final def getImpurity: String = getOrDefault(impurity).toLowerCase
+  final def getImpurity: String = $(impurity).toLowerCase
 
   /** Convert new impurity to old impurity. */
   private[ml] def getOldImpurity: OldImpurity = {
@@ -248,7 +245,7 @@ private[ml] trait TreeRegressorParams extends Params {
   def setImpurity(value: String): this.type = set(impurity, value)
 
   /** @group getParam */
-  final def getImpurity: String = getOrDefault(impurity).toLowerCase
+  final def getImpurity: String = $(impurity).toLowerCase
 
   /** Convert new impurity to old impurity. */
   private[ml] def getOldImpurity: OldImpurity = {
@@ -291,7 +288,7 @@ private[ml] trait TreeEnsembleParams extends DecisionTreeParams with HasSeed {
   def setSubsamplingRate(value: Double): this.type = set(subsamplingRate, value)
 
   /** @group getParam */
-  final def getSubsamplingRate: Double = getOrDefault(subsamplingRate)
+  final def getSubsamplingRate: Double = $(subsamplingRate)
 
   /** @group setParam */
   def setSeed(value: Long): this.type = set(seed, value)
@@ -364,13 +361,13 @@ private[ml] trait RandomForestParams extends TreeEnsembleParams {
   def setNumTrees(value: Int): this.type = set(numTrees, value)
 
   /** @group getParam */
-  final def getNumTrees: Int = getOrDefault(numTrees)
+  final def getNumTrees: Int = $(numTrees)
 
   /** @group setParam */
   def setFeatureSubsetStrategy(value: String): this.type = set(featureSubsetStrategy, value)
 
   /** @group getParam */
-  final def getFeatureSubsetStrategy: String = getOrDefault(featureSubsetStrategy).toLowerCase
+  final def getFeatureSubsetStrategy: String = $(featureSubsetStrategy).toLowerCase
 }
 
 private[ml] object RandomForestParams {
@@ -418,7 +415,7 @@ private[ml] trait GBTParams extends TreeEnsembleParams with HasMaxIter {
   def setStepSize(value: Double): this.type = set(stepSize, value)
 
   /** @group getParam */
-  final def getStepSize: Double = getOrDefault(stepSize)
+  final def getStepSize: Double = $(stepSize)
 
   /** (private[ml]) Create a BoostingStrategy instance to use with the old API. */
   private[ml] def getOldBoostingStrategy(

http://git-wip-us.apache.org/repos/asf/spark/blob/e0833c59/mllib/src/main/scala/org/apache/spark/ml/param/params.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/ml/param/params.scala b/mllib/src/main/scala/org/apache/spark/ml/param/params.scala
index df6360d..51ce19d 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/param/params.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/param/params.scala
@@ -23,7 +23,7 @@ import java.util.NoSuchElementException
 import scala.annotation.varargs
 import scala.collection.mutable
 
-import org.apache.spark.annotation.{AlphaComponent, DeveloperApi}
+import org.apache.spark.annotation.AlphaComponent
 import org.apache.spark.ml.util.Identifiable
 
 /**
@@ -49,7 +49,7 @@ class Param[T] (val parent: Params, val name: String, val doc: String, val isVal
    * Assert that the given value is valid for this parameter.
    *
    * Note: Parameter checks involving interactions between multiple parameters should be
-   *       implemented in [[Params.validate()]].  Checks for input/output columns should be
+   *       implemented in [[Params.validateParams()]].  Checks for input/output columns should be
    *       implemented in [[org.apache.spark.ml.PipelineStage.transformSchema()]].
    *
    * DEVELOPERS: This method is only called by [[ParamPair]], which means that all parameters
@@ -258,7 +258,9 @@ trait Params extends Identifiable with Serializable {
    * [[Param.validate()]].  This method does not handle input/output column parameters;
    * those are checked during schema validation.
    */
-  def validate(paramMap: ParamMap): Unit = { }
+  def validateParams(paramMap: ParamMap): Unit = {
+    copy(paramMap).validateParams()
+  }
 
   /**
    * Validates parameter values stored internally.
@@ -269,7 +271,11 @@ trait Params extends Identifiable with Serializable {
    * [[Param.validate()]].  This method does not handle input/output column parameters;
    * those are checked during schema validation.
    */
-  def validate(): Unit = validate(ParamMap.empty)
+  def validateParams(): Unit = {
+    params.filter(isDefined _).foreach { param =>
+      param.asInstanceOf[Param[Any]].validate($(param))
+    }
+  }
 
   /**
    * Returns the documentation of all params.
@@ -288,6 +294,11 @@ trait Params extends Identifiable with Serializable {
     defaultParamMap.contains(param) || paramMap.contains(param)
   }
 
+  /** Tests whether this instance contains a param with a given name. */
+  def hasParam(paramName: String): Boolean = {
+    params.exists(_.name == paramName)
+  }
+
   /** Gets a param by its name. */
   def getParam(paramName: String): Param[Any] = {
     params.find(_.name == paramName).getOrElse {
@@ -337,6 +348,9 @@ trait Params extends Identifiable with Serializable {
     get(param).orElse(getDefault(param)).get
   }
 
+  /** An alias for [[getOrDefault()]]. */
+  protected final def $[T](param: Param[T]): T = getOrDefault(param)
+
   /**
    * Sets a default value for a param.
    * @param param  param to set the default value. Make sure that this param is initialized before
@@ -383,18 +397,30 @@ trait Params extends Identifiable with Serializable {
   }
 
   /**
+   * Creates a copy of this instance with a randomly generated uid and some extra params.
+   * The default implementation calls the default constructor to create a new instance, then
+   * copies the embedded and extra parameters over and returns the new instance.
+   * Subclasses should override this method if the default approach is not sufficient.
+   */
+  def copy(extra: ParamMap): Params = {
+    val that = this.getClass.newInstance()
+    copyValues(that, extra)
+    that
+  }
+
+  /**
    * Extracts the embedded default param values and user-supplied values, and then merges them with
    * extra values from input into a flat param map, where the latter value is used if there exist
    * conflicts, i.e., with ordering: default param values < user-supplied values < extraParamMap.
    */
-  protected final def extractParamMap(extraParamMap: ParamMap): ParamMap = {
+  final def extractParamMap(extraParamMap: ParamMap): ParamMap = {
     defaultParamMap ++ paramMap ++ extraParamMap
   }
 
   /**
    * [[extractParamMap]] with no extra values.
    */
-  protected final def extractParamMap(): ParamMap = {
+  final def extractParamMap(): ParamMap = {
     extractParamMap(ParamMap.empty)
   }
 
@@ -408,34 +434,21 @@ trait Params extends Identifiable with Serializable {
   private def shouldOwn(param: Param[_]): Unit = {
     require(param.parent.eq(this), s"Param $param does not belong to $this.")
   }
-}
 
-/**
- * :: DeveloperApi ::
- *
- * Helper functionality for developers.
- *
- * NOTE: This is currently private[spark] but will be made public later once it is stabilized.
- */
-@DeveloperApi
-private[spark] object Params {
-
-  /**
-   * Copies parameter values from the parent estimator to the child model it produced.
-   * @param paramMap the param map that holds parameters of the parent
-   * @param parent the parent estimator
-   * @param child the child model
-   */
-  def inheritValues[E <: Params, M <: E](
-      paramMap: ParamMap,
-      parent: E,
-      child: M): Unit = {
-    val childParams = child.params.map(_.name).toSet
-    parent.params.foreach { param =>
-      if (paramMap.contains(param) && childParams.contains(param.name)) {
-        child.set(child.getParam(param.name), paramMap(param))
+  /**
+   * Copies param values from this instance to another instance for params shared by them.
+   * @param to the target instance
+   * @param extra extra params to be copied
+   * @return the target instance with param values copied
+   */
+  protected def copyValues[T <: Params](to: T, extra: ParamMap = ParamMap.empty): T = {
+    val map = extractParamMap(extra)
+    params.foreach { param =>
+      if (map.contains(param) && to.hasParam(param.name)) {
+        to.set(param.name, map(param))
       }
     }
+    to
   }
 }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/e0833c59/mllib/src/main/scala/org/apache/spark/ml/param/shared/SharedParamsCodeGen.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/ml/param/shared/SharedParamsCodeGen.scala b/mllib/src/main/scala/org/apache/spark/ml/param/shared/SharedParamsCodeGen.scala
index 7da4bb4..d379172 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/param/shared/SharedParamsCodeGen.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/param/shared/SharedParamsCodeGen.scala
@@ -21,8 +21,6 @@ import java.io.PrintWriter
 
 import scala.reflect.ClassTag
 
-import org.apache.spark.ml.param.ParamValidators
-
 /**
  * Code generator for shared params (sharedParams.scala). Run under the Spark folder with
  * {{{
@@ -142,7 +140,7 @@ private[shared] object SharedParamsCodeGen {
       |  final val $name: $Param = new $Param(this, "$name", "$doc"$isValid)
       |$setDefault
       |  /** @group getParam */
-      |  final def get$Name: $T = getOrDefault($name)
+      |  final def get$Name: $T = $$($name)
       |}
       |""".stripMargin
   }
@@ -169,7 +167,6 @@ private[shared] object SharedParamsCodeGen {
         |
         |package org.apache.spark.ml.param.shared
         |
-        |import org.apache.spark.annotation.DeveloperApi
         |import org.apache.spark.ml.param._
         |import org.apache.spark.util.Utils
         |

http://git-wip-us.apache.org/repos/asf/spark/blob/e0833c59/mllib/src/main/scala/org/apache/spark/ml/param/shared/sharedParams.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/ml/param/shared/sharedParams.scala b/mllib/src/main/scala/org/apache/spark/ml/param/shared/sharedParams.scala
index e1549f4..fb1874c 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/param/shared/sharedParams.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/param/shared/sharedParams.scala
@@ -17,7 +17,6 @@
 
 package org.apache.spark.ml.param.shared
 
-import org.apache.spark.annotation.DeveloperApi
 import org.apache.spark.ml.param._
 import org.apache.spark.util.Utils
 
@@ -37,7 +36,7 @@ private[ml] trait HasRegParam extends Params {
   final val regParam: DoubleParam = new DoubleParam(this, "regParam", "regularization parameter (>= 0)", ParamValidators.gtEq(0))
 
   /** @group getParam */
-  final def getRegParam: Double = getOrDefault(regParam)
+  final def getRegParam: Double = $(regParam)
 }
 
 /**
@@ -52,7 +51,7 @@ private[ml] trait HasMaxIter extends Params {
   final val maxIter: IntParam = new IntParam(this, "maxIter", "max number of iterations (>= 0)", ParamValidators.gtEq(0))
 
   /** @group getParam */
-  final def getMaxIter: Int = getOrDefault(maxIter)
+  final def getMaxIter: Int = $(maxIter)
 }
 
 /**
@@ -69,7 +68,7 @@ private[ml] trait HasFeaturesCol extends Params {
   setDefault(featuresCol, "features")
 
   /** @group getParam */
-  final def getFeaturesCol: String = getOrDefault(featuresCol)
+  final def getFeaturesCol: String = $(featuresCol)
 }
 
 /**
@@ -86,7 +85,7 @@ private[ml] trait HasLabelCol extends Params {
   setDefault(labelCol, "label")
 
   /** @group getParam */
-  final def getLabelCol: String = getOrDefault(labelCol)
+  final def getLabelCol: String = $(labelCol)
 }
 
 /**
@@ -103,7 +102,7 @@ private[ml] trait HasPredictionCol extends Params {
   setDefault(predictionCol, "prediction")
 
   /** @group getParam */
-  final def getPredictionCol: String = getOrDefault(predictionCol)
+  final def getPredictionCol: String = $(predictionCol)
 }
 
 /**
@@ -120,7 +119,7 @@ private[ml] trait HasRawPredictionCol extends Params {
   setDefault(rawPredictionCol, "rawPrediction")
 
   /** @group getParam */
-  final def getRawPredictionCol: String = getOrDefault(rawPredictionCol)
+  final def getRawPredictionCol: String = $(rawPredictionCol)
 }
 
 /**
@@ -137,7 +136,7 @@ private[ml] trait HasProbabilityCol extends Params {
   setDefault(probabilityCol, "probability")
 
   /** @group getParam */
-  final def getProbabilityCol: String = getOrDefault(probabilityCol)
+  final def getProbabilityCol: String = $(probabilityCol)
 }
 
 /**
@@ -152,7 +151,7 @@ private[ml] trait HasThreshold extends Params {
   final val threshold: DoubleParam = new DoubleParam(this, "threshold", "threshold in binary classification prediction, in range [0, 1]", ParamValidators.inRange(0, 1))
 
   /** @group getParam */
-  final def getThreshold: Double = getOrDefault(threshold)
+  final def getThreshold: Double = $(threshold)
 }
 
 /**
@@ -167,7 +166,7 @@ private[ml] trait HasInputCol extends Params {
   final val inputCol: Param[String] = new Param[String](this, "inputCol", "input column name")
 
   /** @group getParam */
-  final def getInputCol: String = getOrDefault(inputCol)
+  final def getInputCol: String = $(inputCol)
 }
 
 /**
@@ -182,7 +181,7 @@ private[ml] trait HasInputCols extends Params {
   final val inputCols: Param[Array[String]] = new Param[Array[String]](this, "inputCols", "input column names")
 
   /** @group getParam */
-  final def getInputCols: Array[String] = getOrDefault(inputCols)
+  final def getInputCols: Array[String] = $(inputCols)
 }
 
 /**
@@ -197,7 +196,7 @@ private[ml] trait HasOutputCol extends Params {
   final val outputCol: Param[String] = new Param[String](this, "outputCol", "output column name")
 
   /** @group getParam */
-  final def getOutputCol: String = getOrDefault(outputCol)
+  final def getOutputCol: String = $(outputCol)
 }
 
 /**
@@ -212,7 +211,7 @@ private[ml] trait HasCheckpointInterval extends Params {
   final val checkpointInterval: IntParam = new IntParam(this, "checkpointInterval", "checkpoint interval (>= 1)", ParamValidators.gtEq(1))
 
   /** @group getParam */
-  final def getCheckpointInterval: Int = getOrDefault(checkpointInterval)
+  final def getCheckpointInterval: Int = $(checkpointInterval)
 }
 
 /**
@@ -229,7 +228,7 @@ private[ml] trait HasFitIntercept extends Params {
   setDefault(fitIntercept, true)
 
   /** @group getParam */
-  final def getFitIntercept: Boolean = getOrDefault(fitIntercept)
+  final def getFitIntercept: Boolean = $(fitIntercept)
 }
 
 /**
@@ -246,7 +245,7 @@ private[ml] trait HasSeed extends Params {
   setDefault(seed, Utils.random.nextLong())
 
   /** @group getParam */
-  final def getSeed: Long = getOrDefault(seed)
+  final def getSeed: Long = $(seed)
 }
 
 /**
@@ -261,7 +260,7 @@ private[ml] trait HasElasticNetParam extends Params {
   final val elasticNetParam: DoubleParam = new DoubleParam(this, "elasticNetParam", "the ElasticNet mixing parameter, in range [0, 1]. For alpha = 0, the penalty is an L2 penalty. For alpha = 1, it is an L1 penalty.", ParamValidators.inRange(0, 1))
 
   /** @group getParam */
-  final def getElasticNetParam: Double = getOrDefault(elasticNetParam)
+  final def getElasticNetParam: Double = $(elasticNetParam)
 }
 
 /**
@@ -276,7 +275,7 @@ private[ml] trait HasTol extends Params {
   final val tol: DoubleParam = new DoubleParam(this, "tol", "the convergence tolerance for iterative algorithms")
 
   /** @group getParam */
-  final def getTol: Double = getOrDefault(tol)
+  final def getTol: Double = $(tol)
 }
 
 /**
@@ -291,6 +290,6 @@ private[ml] trait HasStepSize extends Params {
   final val stepSize: DoubleParam = new DoubleParam(this, "stepSize", "Step size to be used for each iteration of optimization.")
 
   /** @group getParam */
-  final def getStepSize: Double = getOrDefault(stepSize)
+  final def getStepSize: Double = $(stepSize)
 }
 // scalastyle:on

http://git-wip-us.apache.org/repos/asf/spark/blob/e0833c59/mllib/src/main/scala/org/apache/spark/ml/recommendation/ALS.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/ml/recommendation/ALS.scala b/mllib/src/main/scala/org/apache/spark/ml/recommendation/ALS.scala
index f9f2b27..6cf4b40 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/recommendation/ALS.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/recommendation/ALS.scala
@@ -59,7 +59,7 @@ private[recommendation] trait ALSParams extends Params with HasMaxIter with HasR
   val rank = new IntParam(this, "rank", "rank of the factorization", ParamValidators.gtEq(1))
 
   /** @group getParam */
-  def getRank: Int = getOrDefault(rank)
+  def getRank: Int = $(rank)
 
   /**
    * Param for number of user blocks (>= 1).
@@ -70,7 +70,7 @@ private[recommendation] trait ALSParams extends Params with HasMaxIter with HasR
     ParamValidators.gtEq(1))
 
   /** @group getParam */
-  def getNumUserBlocks: Int = getOrDefault(numUserBlocks)
+  def getNumUserBlocks: Int = $(numUserBlocks)
 
   /**
    * Param for number of item blocks (>= 1).
@@ -81,7 +81,7 @@ private[recommendation] trait ALSParams extends Params with HasMaxIter with HasR
       ParamValidators.gtEq(1))
 
   /** @group getParam */
-  def getNumItemBlocks: Int = getOrDefault(numItemBlocks)
+  def getNumItemBlocks: Int = $(numItemBlocks)
 
   /**
    * Param to decide whether to use implicit preference.
@@ -91,7 +91,7 @@ private[recommendation] trait ALSParams extends Params with HasMaxIter with HasR
   val implicitPrefs = new BooleanParam(this, "implicitPrefs", "whether to use implicit preference")
 
   /** @group getParam */
-  def getImplicitPrefs: Boolean = getOrDefault(implicitPrefs)
+  def getImplicitPrefs: Boolean = $(implicitPrefs)
 
   /**
    * Param for the alpha parameter in the implicit preference formulation (>= 0).
@@ -102,7 +102,7 @@ private[recommendation] trait ALSParams extends Params with HasMaxIter with HasR
     ParamValidators.gtEq(0))
 
   /** @group getParam */
-  def getAlpha: Double = getOrDefault(alpha)
+  def getAlpha: Double = $(alpha)
 
   /**
    * Param for the column name for user ids.
@@ -112,7 +112,7 @@ private[recommendation] trait ALSParams extends Params with HasMaxIter with HasR
   val userCol = new Param[String](this, "userCol", "column name for user ids")
 
   /** @group getParam */
-  def getUserCol: String = getOrDefault(userCol)
+  def getUserCol: String = $(userCol)
 
   /**
    * Param for the column name for item ids.
@@ -122,7 +122,7 @@ private[recommendation] trait ALSParams extends Params with HasMaxIter with HasR
   val itemCol = new Param[String](this, "itemCol", "column name for item ids")
 
   /** @group getParam */
-  def getItemCol: String = getOrDefault(itemCol)
+  def getItemCol: String = $(itemCol)
 
   /**
    * Param for the column name for ratings.
@@ -132,7 +132,7 @@ private[recommendation] trait ALSParams extends Params with HasMaxIter with HasR
   val ratingCol = new Param[String](this, "ratingCol", "column name for ratings")
 
   /** @group getParam */
-  def getRatingCol: String = getOrDefault(ratingCol)
+  def getRatingCol: String = $(ratingCol)
 
   /**
    * Param for whether to apply nonnegativity constraints.
@@ -143,7 +143,7 @@ private[recommendation] trait ALSParams extends Params with HasMaxIter with HasR
     this, "nonnegative", "whether to use nonnegative constraint for least squares")
 
   /** @group getParam */
-  def getNonnegative: Boolean = getOrDefault(nonnegative)
+  def getNonnegative: Boolean = $(nonnegative)
 
   setDefault(rank -> 10, maxIter -> 10, regParam -> 0.1, numUserBlocks -> 10, numItemBlocks -> 10,
     implicitPrefs -> false, alpha -> 1.0, userCol -> "user", itemCol -> "item",
@@ -152,19 +152,17 @@ private[recommendation] trait ALSParams extends Params with HasMaxIter with HasR
   /**
    * Validates and transforms the input schema.
    * @param schema input schema
-   * @param paramMap extra params
    * @return output schema
    */
-  protected def validateAndTransformSchema(schema: StructType, paramMap: ParamMap): StructType = {
-    val map = extractParamMap(paramMap)
-    assert(schema(map(userCol)).dataType == IntegerType)
-    assert(schema(map(itemCol)).dataType== IntegerType)
-    val ratingType = schema(map(ratingCol)).dataType
-    assert(ratingType == FloatType || ratingType == DoubleType)
-    val predictionColName = map(predictionCol)
-    assert(!schema.fieldNames.contains(predictionColName),
+  protected def validateAndTransformSchema(schema: StructType): StructType = {
+    require(schema($(userCol)).dataType == IntegerType)
+    require(schema($(itemCol)).dataType== IntegerType)
+    val ratingType = schema($(ratingCol)).dataType
+    require(ratingType == FloatType || ratingType == DoubleType)
+    val predictionColName = $(predictionCol)
+    require(!schema.fieldNames.contains(predictionColName),
       s"Prediction column $predictionColName already exists.")
-    val newFields = schema.fields :+ StructField(map(predictionCol), FloatType, nullable = false)
+    val newFields = schema.fields :+ StructField($(predictionCol), FloatType, nullable = false)
     StructType(newFields)
   }
 }
@@ -174,7 +172,6 @@ private[recommendation] trait ALSParams extends Params with HasMaxIter with HasR
  */
 class ALSModel private[ml] (
     override val parent: ALS,
-    override val fittingParamMap: ParamMap,
     k: Int,
     userFactors: RDD[(Int, Array[Float])],
     itemFactors: RDD[(Int, Array[Float])])
@@ -183,9 +180,8 @@ class ALSModel private[ml] (
   /** @group setParam */
   def setPredictionCol(value: String): this.type = set(predictionCol, value)
 
-  override def transform(dataset: DataFrame, paramMap: ParamMap): DataFrame = {
+  override def transform(dataset: DataFrame): DataFrame = {
     import dataset.sqlContext.implicits._
-    val map = extractParamMap(paramMap)
     val users = userFactors.toDF("id", "features")
     val items = itemFactors.toDF("id", "features")
 
@@ -199,13 +195,13 @@ class ALSModel private[ml] (
       }
     }
     dataset
-      .join(users, dataset(map(userCol)) === users("id"), "left")
-      .join(items, dataset(map(itemCol)) === items("id"), "left")
-      .select(dataset("*"), predict(users("features"), items("features")).as(map(predictionCol)))
+      .join(users, dataset($(userCol)) === users("id"), "left")
+      .join(items, dataset($(itemCol)) === items("id"), "left")
+      .select(dataset("*"), predict(users("features"), items("features")).as($(predictionCol)))
   }
 
-  override def transformSchema(schema: StructType, paramMap: ParamMap): StructType = {
-    validateAndTransformSchema(schema, paramMap)
+  override def transformSchema(schema: StructType): StructType = {
+    validateAndTransformSchema(schema)
   }
 }
 
@@ -292,25 +288,22 @@ class ALS extends Estimator[ALSModel] with ALSParams {
     this
   }
 
-  override def fit(dataset: DataFrame, paramMap: ParamMap): ALSModel = {
-    val map = extractParamMap(paramMap)
+  override def fit(dataset: DataFrame): ALSModel = {
     val ratings = dataset
-      .select(col(map(userCol)), col(map(itemCol)), col(map(ratingCol)).cast(FloatType))
+      .select(col($(userCol)), col($(itemCol)), col($(ratingCol)).cast(FloatType))
       .map { row =>
         Rating(row.getInt(0), row.getInt(1), row.getFloat(2))
       }
-    val (userFactors, itemFactors) = ALS.train(ratings, rank = map(rank),
-      numUserBlocks = map(numUserBlocks), numItemBlocks = map(numItemBlocks),
-      maxIter = map(maxIter), regParam = map(regParam), implicitPrefs = map(implicitPrefs),
-      alpha = map(alpha), nonnegative = map(nonnegative),
-      checkpointInterval = map(checkpointInterval))
-    val model = new ALSModel(this, map, map(rank), userFactors, itemFactors)
-    Params.inheritValues(map, this, model)
-    model
+    val (userFactors, itemFactors) = ALS.train(ratings, rank = $(rank),
+      numUserBlocks = $(numUserBlocks), numItemBlocks = $(numItemBlocks),
+      maxIter = $(maxIter), regParam = $(regParam), implicitPrefs = $(implicitPrefs),
+      alpha = $(alpha), nonnegative = $(nonnegative),
+      checkpointInterval = $(checkpointInterval))
+    copyValues(new ALSModel(this, $(rank), userFactors, itemFactors))
   }
 
-  override def transformSchema(schema: StructType, paramMap: ParamMap): StructType = {
-    validateAndTransformSchema(schema, paramMap)
+  override def transformSchema(schema: StructType): StructType = {
+    validateAndTransformSchema(schema)
   }
 }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/e0833c59/mllib/src/main/scala/org/apache/spark/ml/regression/DecisionTreeRegressor.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/ml/regression/DecisionTreeRegressor.scala b/mllib/src/main/scala/org/apache/spark/ml/regression/DecisionTreeRegressor.scala
index 756725a..b07c26f 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/regression/DecisionTreeRegressor.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/regression/DecisionTreeRegressor.scala
@@ -20,7 +20,7 @@ package org.apache.spark.ml.regression
 import org.apache.spark.annotation.AlphaComponent
 import org.apache.spark.ml.impl.estimator.{PredictionModel, Predictor}
 import org.apache.spark.ml.impl.tree._
-import org.apache.spark.ml.param.{Params, ParamMap}
+import org.apache.spark.ml.param.ParamMap
 import org.apache.spark.ml.tree.{DecisionTreeModel, Node}
 import org.apache.spark.ml.util.MetadataUtils
 import org.apache.spark.mllib.linalg.Vector
@@ -31,7 +31,6 @@ import org.apache.spark.mllib.tree.model.{DecisionTreeModel => OldDecisionTreeMo
 import org.apache.spark.rdd.RDD
 import org.apache.spark.sql.DataFrame
 
-
 /**
  * :: AlphaComponent ::
  *
@@ -63,15 +62,13 @@ final class DecisionTreeRegressor
 
   override def setImpurity(value: String): this.type = super.setImpurity(value)
 
-  override protected def train(
-      dataset: DataFrame,
-      paramMap: ParamMap): DecisionTreeRegressionModel = {
+  override protected def train(dataset: DataFrame): DecisionTreeRegressionModel = {
     val categoricalFeatures: Map[Int, Int] =
-      MetadataUtils.getCategoricalFeatures(dataset.schema(paramMap(featuresCol)))
-    val oldDataset: RDD[LabeledPoint] = extractLabeledPoints(dataset, paramMap)
+      MetadataUtils.getCategoricalFeatures(dataset.schema($(featuresCol)))
+    val oldDataset: RDD[LabeledPoint] = extractLabeledPoints(dataset)
     val strategy = getOldStrategy(categoricalFeatures)
     val oldModel = OldDecisionTree.train(oldDataset, strategy)
-    DecisionTreeRegressionModel.fromOld(oldModel, this, paramMap, categoricalFeatures)
+    DecisionTreeRegressionModel.fromOld(oldModel, this, categoricalFeatures)
   }
 
   /** (private[ml]) Create a Strategy instance to use with the old API. */
@@ -96,7 +93,6 @@ object DecisionTreeRegressor {
 @AlphaComponent
 final class DecisionTreeRegressionModel private[ml] (
     override val parent: DecisionTreeRegressor,
-    override val fittingParamMap: ParamMap,
     override val rootNode: Node)
   extends PredictionModel[Vector, DecisionTreeRegressionModel]
   with DecisionTreeModel with Serializable {
@@ -108,10 +104,8 @@ final class DecisionTreeRegressionModel private[ml] (
     rootNode.predict(features)
   }
 
-  override protected def copy(): DecisionTreeRegressionModel = {
-    val m = new DecisionTreeRegressionModel(parent, fittingParamMap, rootNode)
-    Params.inheritValues(this.extractParamMap(), this, m)
-    m
+  override def copy(extra: ParamMap): DecisionTreeRegressionModel = {
+    copyValues(new DecisionTreeRegressionModel(parent, rootNode), extra)
   }
 
   override def toString: String = {
@@ -130,12 +124,11 @@ private[ml] object DecisionTreeRegressionModel {
   def fromOld(
       oldModel: OldDecisionTreeModel,
       parent: DecisionTreeRegressor,
-      fittingParamMap: ParamMap,
       categoricalFeatures: Map[Int, Int]): DecisionTreeRegressionModel = {
     require(oldModel.algo == OldAlgo.Regression,
       s"Cannot convert non-regression DecisionTreeModel (old API) to" +
         s" DecisionTreeRegressionModel (new API).  Algo is: ${oldModel.algo}")
     val rootNode = Node.fromOld(oldModel.topNode, categoricalFeatures)
-    new DecisionTreeRegressionModel(parent, fittingParamMap, rootNode)
+    new DecisionTreeRegressionModel(parent, rootNode)
   }
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/e0833c59/mllib/src/main/scala/org/apache/spark/ml/regression/GBTRegressor.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/ml/regression/GBTRegressor.scala b/mllib/src/main/scala/org/apache/spark/ml/regression/GBTRegressor.scala
index 76c9837..bc79695 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/regression/GBTRegressor.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/regression/GBTRegressor.scala
@@ -23,20 +23,18 @@ import org.apache.spark.Logging
 import org.apache.spark.annotation.AlphaComponent
 import org.apache.spark.ml.impl.estimator.{PredictionModel, Predictor}
 import org.apache.spark.ml.impl.tree._
-import org.apache.spark.ml.param.{Params, ParamMap, Param}
+import org.apache.spark.ml.param.{Param, ParamMap}
 import org.apache.spark.ml.tree.{DecisionTreeModel, TreeEnsembleModel}
 import org.apache.spark.ml.util.MetadataUtils
 import org.apache.spark.mllib.linalg.Vector
 import org.apache.spark.mllib.regression.LabeledPoint
 import org.apache.spark.mllib.tree.{GradientBoostedTrees => OldGBT}
 import org.apache.spark.mllib.tree.configuration.{Algo => OldAlgo}
-import org.apache.spark.mllib.tree.loss.{AbsoluteError => OldAbsoluteError, Loss => OldLoss,
-  SquaredError => OldSquaredError}
+import org.apache.spark.mllib.tree.loss.{AbsoluteError => OldAbsoluteError, Loss => OldLoss, SquaredError => OldSquaredError}
 import org.apache.spark.mllib.tree.model.{GradientBoostedTreesModel => OldGBTModel}
 import org.apache.spark.rdd.RDD
 import org.apache.spark.sql.DataFrame
 
-
 /**
  * :: AlphaComponent ::
  *
@@ -111,7 +109,7 @@ final class GBTRegressor
   def setLossType(value: String): this.type = set(lossType, value)
 
   /** @group getParam */
-  def getLossType: String = getOrDefault(lossType).toLowerCase
+  def getLossType: String = $(lossType).toLowerCase
 
   /** (private[ml]) Convert new loss to old loss. */
   override private[ml] def getOldLossType: OldLoss = {
@@ -124,16 +122,14 @@ final class GBTRegressor
     }
   }
 
-  override protected def train(
-      dataset: DataFrame,
-      paramMap: ParamMap): GBTRegressionModel = {
+  override protected def train(dataset: DataFrame): GBTRegressionModel = {
     val categoricalFeatures: Map[Int, Int] =
-      MetadataUtils.getCategoricalFeatures(dataset.schema(paramMap(featuresCol)))
-    val oldDataset: RDD[LabeledPoint] = extractLabeledPoints(dataset, paramMap)
+      MetadataUtils.getCategoricalFeatures(dataset.schema($(featuresCol)))
+    val oldDataset: RDD[LabeledPoint] = extractLabeledPoints(dataset)
     val boostingStrategy = super.getOldBoostingStrategy(categoricalFeatures, OldAlgo.Regression)
     val oldGBT = new OldGBT(boostingStrategy)
     val oldModel = oldGBT.run(oldDataset)
-    GBTRegressionModel.fromOld(oldModel, this, paramMap, categoricalFeatures)
+    GBTRegressionModel.fromOld(oldModel, this, categoricalFeatures)
   }
 }
 
@@ -155,7 +151,6 @@ object GBTRegressor {
 @AlphaComponent
 final class GBTRegressionModel(
     override val parent: GBTRegressor,
-    override val fittingParamMap: ParamMap,
     private val _trees: Array[DecisionTreeRegressionModel],
     private val _treeWeights: Array[Double])
   extends PredictionModel[Vector, GBTRegressionModel]
@@ -178,10 +173,8 @@ final class GBTRegressionModel(
     if (prediction > 0.0) 1.0 else 0.0
   }
 
-  override protected def copy(): GBTRegressionModel = {
-    val m = new GBTRegressionModel(parent, fittingParamMap, _trees, _treeWeights)
-    Params.inheritValues(this.extractParamMap(), this, m)
-    m
+  override def copy(extra: ParamMap): GBTRegressionModel = {
+    copyValues(new GBTRegressionModel(parent, _trees, _treeWeights), extra)
   }
 
   override def toString: String = {
@@ -200,14 +193,13 @@ private[ml] object GBTRegressionModel {
   def fromOld(
       oldModel: OldGBTModel,
       parent: GBTRegressor,
-      fittingParamMap: ParamMap,
       categoricalFeatures: Map[Int, Int]): GBTRegressionModel = {
     require(oldModel.algo == OldAlgo.Regression, "Cannot convert GradientBoostedTreesModel" +
       s" with algo=${oldModel.algo} (old API) to GBTRegressionModel (new API).")
     val newTrees = oldModel.trees.map { tree =>
       // parent, fittingParamMap for each tree is null since there are no good ways to set these.
-      DecisionTreeRegressionModel.fromOld(tree, null, null, categoricalFeatures)
+      DecisionTreeRegressionModel.fromOld(tree, null, categoricalFeatures)
     }
-    new GBTRegressionModel(parent, fittingParamMap, newTrees, oldModel.treeWeights)
+    new GBTRegressionModel(parent, newTrees, oldModel.treeWeights)
   }
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/e0833c59/mllib/src/main/scala/org/apache/spark/ml/regression/LinearRegression.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/ml/regression/LinearRegression.scala b/mllib/src/main/scala/org/apache/spark/ml/regression/LinearRegression.scala
index 0b81c48..66c475f 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/regression/LinearRegression.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/regression/LinearRegression.scala
@@ -19,22 +19,22 @@ package org.apache.spark.ml.regression
 
 import scala.collection.mutable
 
-import breeze.linalg.{norm => brzNorm, DenseVector => BDV}
-import breeze.optimize.{LBFGS => BreezeLBFGS, OWLQN => BreezeOWLQN}
-import breeze.optimize.{CachedDiffFunction, DiffFunction}
+import breeze.linalg.{DenseVector => BDV, norm => brzNorm}
+import breeze.optimize.{CachedDiffFunction, DiffFunction, LBFGS => BreezeLBFGS,
+  OWLQN => BreezeOWLQN}
 
+import org.apache.spark.Logging
 import org.apache.spark.annotation.AlphaComponent
-import org.apache.spark.ml.param.{Params, ParamMap}
-import org.apache.spark.ml.param.shared.{HasTol, HasElasticNetParam, HasMaxIter, HasRegParam}
-import org.apache.spark.mllib.stat.MultivariateOnlineSummarizer
+import org.apache.spark.ml.param.ParamMap
+import org.apache.spark.ml.param.shared.{HasElasticNetParam, HasMaxIter, HasRegParam, HasTol}
 import org.apache.spark.mllib.linalg.{Vector, Vectors}
 import org.apache.spark.mllib.linalg.BLAS._
 import org.apache.spark.mllib.regression.LabeledPoint
+import org.apache.spark.mllib.stat.MultivariateOnlineSummarizer
 import org.apache.spark.rdd.RDD
 import org.apache.spark.sql.DataFrame
 import org.apache.spark.storage.StorageLevel
 import org.apache.spark.util.StatCounter
-import org.apache.spark.Logging
 
 /**
  * Params for linear regression.
@@ -96,9 +96,9 @@ class LinearRegression extends Regressor[Vector, LinearRegression, LinearRegress
   def setTol(value: Double): this.type = set(tol, value)
   setDefault(tol -> 1E-6)
 
-  override protected def train(dataset: DataFrame, paramMap: ParamMap): LinearRegressionModel = {
+  override protected def train(dataset: DataFrame): LinearRegressionModel = {
     // Extract columns from data.  If dataset is persisted, do not persist instances.
-    val instances = extractLabeledPoints(dataset, paramMap).map {
+    val instances = extractLabeledPoints(dataset).map {
       case LabeledPoint(label: Double, features: Vector) => (label, features)
     }
     val handlePersistence = dataset.rdd.getStorageLevel == StorageLevel.NONE
@@ -125,7 +125,7 @@ class LinearRegression extends Regressor[Vector, LinearRegression, LinearRegress
       logWarning(s"The standard deviation of the label is zero, so the weights will be zeros " +
         s"and the intercept will be the mean of the label; as a result, training is not needed.")
       if (handlePersistence) instances.unpersist()
-      return new LinearRegressionModel(this, paramMap, Vectors.sparse(numFeatures, Seq()), yMean)
+      return new LinearRegressionModel(this, Vectors.sparse(numFeatures, Seq()), yMean)
     }
 
     val featuresMean = summarizer.mean.toArray
@@ -133,17 +133,17 @@ class LinearRegression extends Regressor[Vector, LinearRegression, LinearRegress
 
     // Since we implicitly do the feature scaling when we compute the cost function
     // to improve the convergence, the effective regParam will be changed.
-    val effectiveRegParam = paramMap(regParam) / yStd
-    val effectiveL1RegParam = paramMap(elasticNetParam) * effectiveRegParam
-    val effectiveL2RegParam = (1.0 - paramMap(elasticNetParam)) * effectiveRegParam
+    val effectiveRegParam = $(regParam) / yStd
+    val effectiveL1RegParam = $(elasticNetParam) * effectiveRegParam
+    val effectiveL2RegParam = (1.0 - $(elasticNetParam)) * effectiveRegParam
 
     val costFun = new LeastSquaresCostFun(instances, yStd, yMean,
       featuresStd, featuresMean, effectiveL2RegParam)
 
-    val optimizer = if (paramMap(elasticNetParam) == 0.0 || effectiveRegParam == 0.0) {
-      new BreezeLBFGS[BDV[Double]](paramMap(maxIter), 10, paramMap(tol))
+    val optimizer = if ($(elasticNetParam) == 0.0 || effectiveRegParam == 0.0) {
+      new BreezeLBFGS[BDV[Double]]($(maxIter), 10, $(tol))
     } else {
-      new BreezeOWLQN[Int, BDV[Double]](paramMap(maxIter), 10, effectiveL1RegParam, paramMap(tol))
+      new BreezeOWLQN[Int, BDV[Double]]($(maxIter), 10, effectiveL1RegParam, $(tol))
     }
 
     val initialWeights = Vectors.zeros(numFeatures)
@@ -178,7 +178,7 @@ class LinearRegression extends Regressor[Vector, LinearRegression, LinearRegress
     if (handlePersistence) instances.unpersist()
 
     // TODO: Converts to sparse format based on the storage, but may base on the scoring speed.
-    new LinearRegressionModel(this, paramMap, weights.compressed, intercept)
+    new LinearRegressionModel(this, weights.compressed, intercept)
   }
 }
 
@@ -190,7 +190,6 @@ class LinearRegression extends Regressor[Vector, LinearRegression, LinearRegress
 @AlphaComponent
 class LinearRegressionModel private[ml] (
     override val parent: LinearRegression,
-    override val fittingParamMap: ParamMap,
     val weights: Vector,
     val intercept: Double)
   extends RegressionModel[Vector, LinearRegressionModel]
@@ -200,10 +199,8 @@ class LinearRegressionModel private[ml] (
     dot(features, weights) + intercept
   }
 
-  override protected def copy(): LinearRegressionModel = {
-    val m = new LinearRegressionModel(parent, fittingParamMap, weights, intercept)
-    Params.inheritValues(extractParamMap(), this, m)
-    m
+  override def copy(extra: ParamMap): LinearRegressionModel = {
+    copyValues(new LinearRegressionModel(parent, weights, intercept), extra)
   }
 }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/e0833c59/mllib/src/main/scala/org/apache/spark/ml/regression/RandomForestRegressor.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/ml/regression/RandomForestRegressor.scala b/mllib/src/main/scala/org/apache/spark/ml/regression/RandomForestRegressor.scala
index 2171ef3..0468a1b 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/regression/RandomForestRegressor.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/regression/RandomForestRegressor.scala
@@ -20,18 +20,17 @@ package org.apache.spark.ml.regression
 import org.apache.spark.annotation.AlphaComponent
 import org.apache.spark.ml.impl.estimator.{PredictionModel, Predictor}
 import org.apache.spark.ml.impl.tree.{RandomForestParams, TreeRegressorParams}
-import org.apache.spark.ml.param.{Params, ParamMap}
+import org.apache.spark.ml.param.ParamMap
 import org.apache.spark.ml.tree.{DecisionTreeModel, TreeEnsembleModel}
 import org.apache.spark.ml.util.MetadataUtils
 import org.apache.spark.mllib.linalg.Vector
 import org.apache.spark.mllib.regression.LabeledPoint
 import org.apache.spark.mllib.tree.{RandomForest => OldRandomForest}
-import org.apache.spark.mllib.tree.configuration.{Algo => OldAlgo, Strategy => OldStrategy}
+import org.apache.spark.mllib.tree.configuration.{Algo => OldAlgo}
 import org.apache.spark.mllib.tree.model.{RandomForestModel => OldRandomForestModel}
 import org.apache.spark.rdd.RDD
 import org.apache.spark.sql.DataFrame
 
-
 /**
  * :: AlphaComponent ::
  *
@@ -77,17 +76,15 @@ final class RandomForestRegressor
   override def setFeatureSubsetStrategy(value: String): this.type =
     super.setFeatureSubsetStrategy(value)
 
-  override protected def train(
-      dataset: DataFrame,
-      paramMap: ParamMap): RandomForestRegressionModel = {
+  override protected def train(dataset: DataFrame): RandomForestRegressionModel = {
     val categoricalFeatures: Map[Int, Int] =
-      MetadataUtils.getCategoricalFeatures(dataset.schema(paramMap(featuresCol)))
-    val oldDataset: RDD[LabeledPoint] = extractLabeledPoints(dataset, paramMap)
+      MetadataUtils.getCategoricalFeatures(dataset.schema($(featuresCol)))
+    val oldDataset: RDD[LabeledPoint] = extractLabeledPoints(dataset)
     val strategy =
       super.getOldStrategy(categoricalFeatures, numClasses = 0, OldAlgo.Regression, getOldImpurity)
     val oldModel = OldRandomForest.trainRegressor(
       oldDataset, strategy, getNumTrees, getFeatureSubsetStrategy, getSeed.toInt)
-    RandomForestRegressionModel.fromOld(oldModel, this, paramMap, categoricalFeatures)
+    RandomForestRegressionModel.fromOld(oldModel, this, categoricalFeatures)
   }
 }
 
@@ -110,7 +107,6 @@ object RandomForestRegressor {
 @AlphaComponent
 final class RandomForestRegressionModel private[ml] (
     override val parent: RandomForestRegressor,
-    override val fittingParamMap: ParamMap,
     private val _trees: Array[DecisionTreeRegressionModel])
   extends PredictionModel[Vector, RandomForestRegressionModel]
   with TreeEnsembleModel with Serializable {
@@ -132,10 +128,8 @@ final class RandomForestRegressionModel private[ml] (
     _trees.map(_.rootNode.predict(features)).sum / numTrees
   }
 
-  override protected def copy(): RandomForestRegressionModel = {
-    val m = new RandomForestRegressionModel(parent, fittingParamMap, _trees)
-    Params.inheritValues(this.extractParamMap(), this, m)
-    m
+  override def copy(extra: ParamMap): RandomForestRegressionModel = {
+    copyValues(new RandomForestRegressionModel(parent, _trees), extra)
   }
 
   override def toString: String = {
@@ -154,14 +148,13 @@ private[ml] object RandomForestRegressionModel {
   def fromOld(
       oldModel: OldRandomForestModel,
       parent: RandomForestRegressor,
-      fittingParamMap: ParamMap,
       categoricalFeatures: Map[Int, Int]): RandomForestRegressionModel = {
     require(oldModel.algo == OldAlgo.Regression, "Cannot convert RandomForestModel" +
       s" with algo=${oldModel.algo} (old API) to RandomForestRegressionModel (new API).")
     val newTrees = oldModel.trees.map { tree =>
       // parent, fittingParamMap for each tree is null since there are no good ways to set these.
-      DecisionTreeRegressionModel.fromOld(tree, null, null, categoricalFeatures)
+      DecisionTreeRegressionModel.fromOld(tree, null, categoricalFeatures)
     }
-    new RandomForestRegressionModel(parent, fittingParamMap, newTrees)
+    new RandomForestRegressionModel(parent, newTrees)
   }
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/e0833c59/mllib/src/main/scala/org/apache/spark/ml/regression/Regressor.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/ml/regression/Regressor.scala b/mllib/src/main/scala/org/apache/spark/ml/regression/Regressor.scala
index d679085..c6b3327 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/regression/Regressor.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/regression/Regressor.scala
@@ -17,7 +17,7 @@
 
 package org.apache.spark.ml.regression
 
-import org.apache.spark.annotation.{DeveloperApi, AlphaComponent}
+import org.apache.spark.annotation.{AlphaComponent, DeveloperApi}
 import org.apache.spark.ml.impl.estimator.{PredictionModel, Predictor, PredictorParams}
 
 /**

http://git-wip-us.apache.org/repos/asf/spark/blob/e0833c59/mllib/src/main/scala/org/apache/spark/ml/tuning/CrossValidator.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/ml/tuning/CrossValidator.scala b/mllib/src/main/scala/org/apache/spark/ml/tuning/CrossValidator.scala
index d1ad089..cee2aa6 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/tuning/CrossValidator.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/tuning/CrossValidator.scala
@@ -39,7 +39,7 @@ private[ml] trait CrossValidatorParams extends Params {
   val estimator: Param[Estimator[_]] = new Param(this, "estimator", "estimator for selection")
 
   /** @group getParam */
-  def getEstimator: Estimator[_] = getOrDefault(estimator)
+  def getEstimator: Estimator[_] = $(estimator)
 
   /**
    * param for estimator param maps
@@ -49,7 +49,7 @@ private[ml] trait CrossValidatorParams extends Params {
     new Param(this, "estimatorParamMaps", "param maps for the estimator")
 
   /** @group getParam */
-  def getEstimatorParamMaps: Array[ParamMap] = getOrDefault(estimatorParamMaps)
+  def getEstimatorParamMaps: Array[ParamMap] = $(estimatorParamMaps)
 
   /**
    * param for the evaluator for selection
@@ -58,7 +58,7 @@ private[ml] trait CrossValidatorParams extends Params {
   val evaluator: Param[Evaluator] = new Param(this, "evaluator", "evaluator for selection")
 
   /** @group getParam */
-  def getEvaluator: Evaluator = getOrDefault(evaluator)
+  def getEvaluator: Evaluator = $(evaluator)
 
   /**
    * Param for number of folds for cross validation.  Must be >= 2.
@@ -69,7 +69,7 @@ private[ml] trait CrossValidatorParams extends Params {
     "number of folds for cross validation (>= 2)", ParamValidators.gtEq(2))
 
   /** @group getParam */
-  def getNumFolds: Int = getOrDefault(numFolds)
+  def getNumFolds: Int = $(numFolds)
 
   setDefault(numFolds -> 3)
 }
@@ -95,23 +95,22 @@ class CrossValidator extends Estimator[CrossValidatorModel] with CrossValidatorP
   /** @group setParam */
   def setNumFolds(value: Int): this.type = set(numFolds, value)
 
-  override def validate(paramMap: ParamMap): Unit = {
+  override def validateParams(paramMap: ParamMap): Unit = {
     getEstimatorParamMaps.foreach { eMap =>
-      getEstimator.validate(eMap ++ paramMap)
+      getEstimator.validateParams(eMap ++ paramMap)
     }
   }
 
-  override def fit(dataset: DataFrame, paramMap: ParamMap): CrossValidatorModel = {
-    val map = extractParamMap(paramMap)
+  override def fit(dataset: DataFrame): CrossValidatorModel = {
     val schema = dataset.schema
-    transformSchema(dataset.schema, paramMap, logging = true)
+    transformSchema(dataset.schema, logging = true)
     val sqlCtx = dataset.sqlContext
-    val est = map(estimator)
-    val eval = map(evaluator)
-    val epm = map(estimatorParamMaps)
+    val est = $(estimator)
+    val eval = $(evaluator)
+    val epm = $(estimatorParamMaps)
     val numModels = epm.length
     val metrics = new Array[Double](epm.length)
-    val splits = MLUtils.kFold(dataset.rdd, map(numFolds), 0)
+    val splits = MLUtils.kFold(dataset.rdd, $(numFolds), 0)
     splits.zipWithIndex.foreach { case ((training, validation), splitIndex) =>
       val trainingDataset = sqlCtx.createDataFrame(training, schema).cache()
       val validationDataset = sqlCtx.createDataFrame(validation, schema).cache()
@@ -121,27 +120,24 @@ class CrossValidator extends Estimator[CrossValidatorModel] with CrossValidatorP
       trainingDataset.unpersist()
       var i = 0
       while (i < numModels) {
-        val metric = eval.evaluate(models(i).transform(validationDataset, epm(i)), map)
+        val metric = eval.evaluate(models(i).transform(validationDataset, epm(i)))
         logDebug(s"Got metric $metric for model trained with ${epm(i)}.")
         metrics(i) += metric
         i += 1
       }
       validationDataset.unpersist()
     }
-    f2jBLAS.dscal(numModels, 1.0 / map(numFolds), metrics, 1)
+    f2jBLAS.dscal(numModels, 1.0 / $(numFolds), metrics, 1)
     logInfo(s"Average cross-validation metrics: ${metrics.toSeq}")
     val (bestMetric, bestIndex) = metrics.zipWithIndex.maxBy(_._1)
     logInfo(s"Best set of parameters:\n${epm(bestIndex)}")
     logInfo(s"Best cross-validation metric: $bestMetric.")
     val bestModel = est.fit(dataset, epm(bestIndex)).asInstanceOf[Model[_]]
-    val cvModel = new CrossValidatorModel(this, map, bestModel)
-    Params.inheritValues(map, this, cvModel)
-    cvModel
+    copyValues(new CrossValidatorModel(this, bestModel))
   }
 
-  override def transformSchema(schema: StructType, paramMap: ParamMap): StructType = {
-    val map = extractParamMap(paramMap)
-    map(estimator).transformSchema(schema, paramMap)
+  override def transformSchema(schema: StructType): StructType = {
+    $(estimator).transformSchema(schema)
   }
 }
 
@@ -152,19 +148,18 @@ class CrossValidator extends Estimator[CrossValidatorModel] with CrossValidatorP
 @AlphaComponent
 class CrossValidatorModel private[ml] (
     override val parent: CrossValidator,
-    override val fittingParamMap: ParamMap,
     val bestModel: Model[_])
   extends Model[CrossValidatorModel] with CrossValidatorParams {
 
-  override def validate(paramMap: ParamMap): Unit = {
-    bestModel.validate(paramMap)
+  override def validateParams(paramMap: ParamMap): Unit = {
+    bestModel.validateParams(paramMap)
   }
 
-  override def transform(dataset: DataFrame, paramMap: ParamMap): DataFrame = {
-    bestModel.transform(dataset, paramMap)
+  override def transform(dataset: DataFrame): DataFrame = {
+    bestModel.transform(dataset)
   }
 
-  override def transformSchema(schema: StructType, paramMap: ParamMap): StructType = {
-    bestModel.transformSchema(schema, paramMap)
+  override def transformSchema(schema: StructType): StructType = {
+    bestModel.transformSchema(schema)
   }
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/e0833c59/mllib/src/test/java/org/apache/spark/ml/classification/JavaLogisticRegressionSuite.java
----------------------------------------------------------------------
diff --git a/mllib/src/test/java/org/apache/spark/ml/classification/JavaLogisticRegressionSuite.java b/mllib/src/test/java/org/apache/spark/ml/classification/JavaLogisticRegressionSuite.java
index 3f8e59d..7e7189a 100644
--- a/mllib/src/test/java/org/apache/spark/ml/classification/JavaLogisticRegressionSuite.java
+++ b/mllib/src/test/java/org/apache/spark/ml/classification/JavaLogisticRegressionSuite.java
@@ -84,9 +84,10 @@ public class JavaLogisticRegressionSuite implements Serializable {
       .setThreshold(0.6)
       .setProbabilityCol("myProbability");
     LogisticRegressionModel model = lr.fit(dataset);
-    assert(model.fittingParamMap().apply(lr.maxIter()).equals(10));
-    assert(model.fittingParamMap().apply(lr.regParam()).equals(1.0));
-    assert(model.fittingParamMap().apply(lr.threshold()).equals(0.6));
+    LogisticRegression parent = model.parent();
+    assert(parent.getMaxIter() == 10);
+    assert(parent.getRegParam() == 1.0);
+    assert(parent.getThreshold() == 0.6);
     assert(model.getThreshold() == 0.6);
 
     // Modify model params, and check that the params worked.
@@ -109,9 +110,10 @@ public class JavaLogisticRegressionSuite implements Serializable {
     // Call fit() with new params, and check as many params as we can.
     LogisticRegressionModel model2 = lr.fit(dataset, lr.maxIter().w(5), lr.regParam().w(0.1),
         lr.threshold().w(0.4), lr.probabilityCol().w("theProb"));
-    assert(model2.fittingParamMap().apply(lr.maxIter()).equals(5));
-    assert(model2.fittingParamMap().apply(lr.regParam()).equals(0.1));
-    assert(model2.fittingParamMap().apply(lr.threshold()).equals(0.4));
+    LogisticRegression parent2 = model2.parent();
+    assert(parent2.getMaxIter() == 5);
+    assert(parent2.getRegParam() == 0.1);
+    assert(parent2.getThreshold() == 0.4);
     assert(model2.getThreshold() == 0.4);
     assert(model2.getProbabilityCol().equals("theProb"));
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org