You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ignite.apache.org by ch...@apache.org on 2019/02/05 12:04:07 UTC

[ignite] branch master updated: IGNITE-11041: [ML] Add parser for Spark Gradient-boosted tree regressor

This is an automated email from the ASF dual-hosted git repository.

chief pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ignite.git


The following commit(s) were added to refs/heads/master by this push:
     new 963e2c2  IGNITE-11041: [ML] Add parser for Spark Gradient-boosted tree regressor
963e2c2 is described below

commit 963e2c2abaabbd9a098cd74c967f9f59d58de2a8
Author: zaleslaw <za...@gmail.com>
AuthorDate: Tue Feb 5 15:03:43 2019 +0300

    IGNITE-11041: [ML] Add parser for Spark Gradient-boosted tree regressor
    
    This closes #6013
---
 .../modelparser/GBTRegressionFromSparkExample.java |  95 +++++++++++++++++++++
 .../spark/serialized/gbtreg/data/._SUCCESS.crc     | Bin 0 -> 8 bytes
 ...-4944-b933-7897869a29d3-c000.snappy.parquet.crc | Bin 0 -> 688 bytes
 .../models/spark/serialized/gbtreg/data/_SUCCESS   |   0
 ...888b-4944-b933-7897869a29d3-c000.snappy.parquet | Bin 0 -> 86992 bytes
 .../spark/serialized/gbtreg/metadata/._SUCCESS.crc | Bin 0 -> 8 bytes
 .../serialized/gbtreg/metadata/.part-00000.crc     | Bin 0 -> 12 bytes
 .../spark/serialized/gbtreg/metadata/_SUCCESS      |   0
 .../spark/serialized/gbtreg/metadata/part-00000    |   1 +
 .../serialized/gbtreg/treesMetadata/._SUCCESS.crc  | Bin 0 -> 8 bytes
 ...-48b3-bad7-07c343405928-c000.snappy.parquet.crc | Bin 0 -> 80 bytes
 .../spark/serialized/gbtreg/treesMetadata/_SUCCESS |   0
 ...1326-48b3-bad7-07c343405928-c000.snappy.parquet | Bin 0 -> 8925 bytes
 .../ml/sparkmodelparser/SparkModelParser.java      |  41 +++++++--
 .../ml/sparkmodelparser/SupportedSparkModels.java  |   3 +
 15 files changed, 134 insertions(+), 6 deletions(-)

diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/spark/modelparser/GBTRegressionFromSparkExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/spark/modelparser/GBTRegressionFromSparkExample.java
new file mode 100644
index 0000000..4c2837c
--- /dev/null
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/spark/modelparser/GBTRegressionFromSparkExample.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.examples.ml.inference.spark.modelparser;
+
+import java.io.FileNotFoundException;
+import javax.cache.Cache;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.Ignition;
+import org.apache.ignite.cache.query.QueryCursor;
+import org.apache.ignite.cache.query.ScanQuery;
+import org.apache.ignite.examples.ml.tutorial.TitanicUtils;
+import org.apache.ignite.ml.composition.ModelsComposition;
+import org.apache.ignite.ml.math.functions.IgniteBiFunction;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
+import org.apache.ignite.ml.sparkmodelparser.SparkModelParser;
+import org.apache.ignite.ml.sparkmodelparser.SupportedSparkModels;
+
+/**
+ * Run GBT Regression model loaded from snappy.parquet file.
+ * The snappy.parquet file was generated by Spark MLLib model.write.overwrite().save(..) operator.
+ * <p>
+ * You can change the test data used in this example and re-run it to explore this algorithm further.</p>
+ */
+public class GBTRegressionFromSparkExample {
+    /** Path to Spark GBT Regression model. */
+    public static final String SPARK_MDL_PATH = "examples/src/main/resources/models/spark/serialized/gbtreg/data" +
+        "/part-00000-db4215b8-888b-4944-b933-7897869a29d3-c000.snappy.parquet";
+
+    /** Spark model metadata path. */
+    private static final String SPARK_MDL_METADATA_PATH = "examples/src/main/resources/models/spark/serialized/gbtreg/treesMetadata" +
+        "/part-00000-999806a9-1326-48b3-bad7-07c343405928-c000.snappy.parquet";
+
+    /** Run example. */
+    public static void main(String[] args) throws FileNotFoundException {
+        System.out.println();
+        System.out.println(">>> GBT Regression model loaded from Spark through serialization over partitioned dataset usage example started.");
+        // Start ignite grid.
+        try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
+            System.out.println(">>> Ignite grid started.");
+
+            IgniteCache<Integer, Object[]> dataCache = TitanicUtils.readPassengers(ignite);
+
+            IgniteBiFunction<Integer, Object[], Vector> featureExtractor = (k, v) -> {
+                double[] data = new double[] {(double)v[0], (double)v[1], (double)v[5], (double)v[6]};
+                data[0] = Double.isNaN(data[0]) ? 0 : data[0];
+                data[1] = Double.isNaN(data[1]) ? 0 : data[1];
+                data[2] = Double.isNaN(data[2]) ? 0 : data[2];
+                data[3] = Double.isNaN(data[3]) ? 0 : data[3];
+                return VectorUtils.of(data);
+            };
+
+            IgniteBiFunction<Integer, Object[], Double> lbExtractor = (k, v) -> (double)v[4];
+
+            ModelsComposition mdl = (ModelsComposition)SparkModelParser.parseWithMetadata(
+                SPARK_MDL_PATH, SPARK_MDL_METADATA_PATH,
+                SupportedSparkModels.GRADIENT_BOOSTED_TREES_REGRESSION
+            );
+
+            System.out.println(">>> GBT Regression model: " + mdl);
+
+            System.out.println(">>> ---------------------------------");
+            System.out.println(">>> | Prediction\t| Ground Truth\t|");
+            System.out.println(">>> ---------------------------------");
+
+            try (QueryCursor<Cache.Entry<Integer, Object[]>> observations = dataCache.query(new ScanQuery<>())) {
+                for (Cache.Entry<Integer, Object[]> observation : observations) {
+                    Vector inputs = featureExtractor.apply(observation.getKey(), observation.getValue());
+                    double groundTruth = lbExtractor.apply(observation.getKey(), observation.getValue());
+                    double prediction = mdl.predict(inputs);
+
+                    System.out.printf(">>> | %.4f\t\t| %.4f\t\t|\n", prediction, groundTruth);
+                }
+            }
+
+            System.out.println(">>> ---------------------------------");
+        }
+    }
+}
diff --git a/examples/src/main/resources/models/spark/serialized/gbtreg/data/._SUCCESS.crc b/examples/src/main/resources/models/spark/serialized/gbtreg/data/._SUCCESS.crc
new file mode 100644
index 0000000..3b7b044
Binary files /dev/null and b/examples/src/main/resources/models/spark/serialized/gbtreg/data/._SUCCESS.crc differ
diff --git a/examples/src/main/resources/models/spark/serialized/gbtreg/data/.part-00000-db4215b8-888b-4944-b933-7897869a29d3-c000.snappy.parquet.crc b/examples/src/main/resources/models/spark/serialized/gbtreg/data/.part-00000-db4215b8-888b-4944-b933-7897869a29d3-c000.snappy.parquet.crc
new file mode 100644
index 0000000..255e0c7
Binary files /dev/null and b/examples/src/main/resources/models/spark/serialized/gbtreg/data/.part-00000-db4215b8-888b-4944-b933-7897869a29d3-c000.snappy.parquet.crc differ
diff --git a/examples/src/main/resources/models/spark/serialized/gbtreg/data/_SUCCESS b/examples/src/main/resources/models/spark/serialized/gbtreg/data/_SUCCESS
new file mode 100644
index 0000000..e69de29
diff --git a/examples/src/main/resources/models/spark/serialized/gbtreg/data/part-00000-db4215b8-888b-4944-b933-7897869a29d3-c000.snappy.parquet b/examples/src/main/resources/models/spark/serialized/gbtreg/data/part-00000-db4215b8-888b-4944-b933-7897869a29d3-c000.snappy.parquet
new file mode 100644
index 0000000..706fa72
Binary files /dev/null and b/examples/src/main/resources/models/spark/serialized/gbtreg/data/part-00000-db4215b8-888b-4944-b933-7897869a29d3-c000.snappy.parquet differ
diff --git a/examples/src/main/resources/models/spark/serialized/gbtreg/metadata/._SUCCESS.crc b/examples/src/main/resources/models/spark/serialized/gbtreg/metadata/._SUCCESS.crc
new file mode 100644
index 0000000..3b7b044
Binary files /dev/null and b/examples/src/main/resources/models/spark/serialized/gbtreg/metadata/._SUCCESS.crc differ
diff --git a/examples/src/main/resources/models/spark/serialized/gbtreg/metadata/.part-00000.crc b/examples/src/main/resources/models/spark/serialized/gbtreg/metadata/.part-00000.crc
new file mode 100644
index 0000000..b4fd7f8
Binary files /dev/null and b/examples/src/main/resources/models/spark/serialized/gbtreg/metadata/.part-00000.crc differ
diff --git a/examples/src/main/resources/models/spark/serialized/gbtreg/metadata/_SUCCESS b/examples/src/main/resources/models/spark/serialized/gbtreg/metadata/_SUCCESS
new file mode 100644
index 0000000..e69de29
diff --git a/examples/src/main/resources/models/spark/serialized/gbtreg/metadata/part-00000 b/examples/src/main/resources/models/spark/serialized/gbtreg/metadata/part-00000
new file mode 100644
index 0000000..86432e8
--- /dev/null
+++ b/examples/src/main/resources/models/spark/serialized/gbtreg/metadata/part-00000
@@ -0,0 +1 @@
+{"class":"org.apache.spark.ml.regression.GBTRegressionModel","timestamp":1548339654834,"sparkVersion":"2.2.0","uid":"gbtr_31c6fb2bba5f","paramMap":{"labelCol":"age","minInstancesPerNode":1,"maxDepth":8,"predictionCol":"prediction","cacheNodeIds":false,"maxIter":100,"stepSize":0.1,"featuresCol":"features","subsamplingRate":1.0,"maxMemoryInMB":256,"lossType":"squared","minInfoGain":0.0,"checkpointInterval":10,"seed":-131597770,"impurity":"variance","maxBins":32},"numFeatures":4,"numTrees":100}
diff --git a/examples/src/main/resources/models/spark/serialized/gbtreg/treesMetadata/._SUCCESS.crc b/examples/src/main/resources/models/spark/serialized/gbtreg/treesMetadata/._SUCCESS.crc
new file mode 100644
index 0000000..3b7b044
Binary files /dev/null and b/examples/src/main/resources/models/spark/serialized/gbtreg/treesMetadata/._SUCCESS.crc differ
diff --git a/examples/src/main/resources/models/spark/serialized/gbtreg/treesMetadata/.part-00000-999806a9-1326-48b3-bad7-07c343405928-c000.snappy.parquet.crc b/examples/src/main/resources/models/spark/serialized/gbtreg/treesMetadata/.part-00000-999806a9-1326-48b3-bad7-07c343405928-c000.snappy.parquet.crc
new file mode 100644
index 0000000..e5efb27
Binary files /dev/null and b/examples/src/main/resources/models/spark/serialized/gbtreg/treesMetadata/.part-00000-999806a9-1326-48b3-bad7-07c343405928-c000.snappy.parquet.crc differ
diff --git a/examples/src/main/resources/models/spark/serialized/gbtreg/treesMetadata/_SUCCESS b/examples/src/main/resources/models/spark/serialized/gbtreg/treesMetadata/_SUCCESS
new file mode 100644
index 0000000..e69de29
diff --git a/examples/src/main/resources/models/spark/serialized/gbtreg/treesMetadata/part-00000-999806a9-1326-48b3-bad7-07c343405928-c000.snappy.parquet b/examples/src/main/resources/models/spark/serialized/gbtreg/treesMetadata/part-00000-999806a9-1326-48b3-bad7-07c343405928-c000.snappy.parquet
new file mode 100644
index 0000000..0f1f46c
Binary files /dev/null and b/examples/src/main/resources/models/spark/serialized/gbtreg/treesMetadata/part-00000-999806a9-1326-48b3-bad7-07c343405928-c000.snappy.parquet differ
diff --git a/modules/ml/spark-model-parser/src/main/java/org/apache/ignite/ml/sparkmodelparser/SparkModelParser.java b/modules/ml/spark-model-parser/src/main/java/org/apache/ignite/ml/sparkmodelparser/SparkModelParser.java
index cc54848..db10814 100644
--- a/modules/ml/spark-model-parser/src/main/java/org/apache/ignite/ml/sparkmodelparser/SparkModelParser.java
+++ b/modules/ml/spark-model-parser/src/main/java/org/apache/ignite/ml/sparkmodelparser/SparkModelParser.java
@@ -58,6 +58,7 @@ import org.apache.parquet.io.RecordReader;
 import org.apache.parquet.schema.MessageType;
 import org.apache.parquet.schema.Type;
 import org.jetbrains.annotations.NotNull;
+import org.jetbrains.annotations.Nullable;
 
 /** Parser of Spark models. */
 public class SparkModelParser {
@@ -187,22 +188,50 @@ public class SparkModelParser {
         switch (parsedSparkMdl) {
             case GRADIENT_BOOSTED_TREES:
                 return loadGBTClassifierModel(ignitePathToMdl, ignitePathToMdlMetaData);
+            case GRADIENT_BOOSTED_TREES_REGRESSION:
+                return loadGBTRegressionModel(ignitePathToMdl, ignitePathToMdlMetaData);
             default:
                 throw new UnsupportedSparkModelException(ignitePathToMdl);
         }
     }
 
     /**
-     * Load GBT model.
+     * Load GDB Regression model.
      *
      * @param pathToMdl Path to model.
-     * @param ignitePathToMdlMetaData Ignite path to model meta data.
+     * @param pathToMdlMetaData Path to model meta data.
      */
-    private static Model loadGBTClassifierModel(String pathToMdl, String ignitePathToMdlMetaData) {
+    private static Model loadGBTRegressionModel(String pathToMdl, String pathToMdlMetaData) {
+        IgniteFunction<Double, Double> lbMapper = lb -> lb;
+
+        return parseAndBuildGDBModel(pathToMdl, pathToMdlMetaData, lbMapper);
+    }
+
+    /**
+     * Load GDB Classification model.
+     *
+     * @param pathToMdl Path to model.
+     * @param pathToMdlMetaData Path to model meta data.
+     */
+    private static Model loadGBTClassifierModel(String pathToMdl, String pathToMdlMetaData) {
+        IgniteFunction<Double, Double> lbMapper = lb -> lb > 0.5 ? 1.0 : 0.0;
+
+        return parseAndBuildGDBModel(pathToMdl, pathToMdlMetaData, lbMapper);
+    }
+
+    /**
+     * Parse and build common GDB model with the custom label mapper.
+     *
+     * @param pathToMdl Path to model.
+     * @param pathToMdlMetaData Path to model meta data.
+     * @param lbMapper Label mapper.
+     */
+    @Nullable private static Model parseAndBuildGDBModel(String pathToMdl, String pathToMdlMetaData,
+        IgniteFunction<Double, Double> lbMapper) {
         double[] treeWeights = null;
         final Map<Integer, Double> treeWeightsByTreeID = new HashMap<>();
 
-        try (ParquetFileReader r = ParquetFileReader.open(HadoopInputFile.fromPath(new Path(ignitePathToMdlMetaData), new Configuration()))) {
+        try (ParquetFileReader r = ParquetFileReader.open(HadoopInputFile.fromPath(new Path(pathToMdlMetaData), new Configuration()))) {
             PageReadStore pagesMetaData;
             final MessageType schema = r.getFooter().getFileMetaData().getSchema();
             final MessageColumnIO colIO = new ColumnIOFactory().getColumnIO(schema);
@@ -219,7 +248,7 @@ public class SparkModelParser {
             }
         }
         catch (IOException e) {
-            System.out.println("Error reading parquet file with MetaData by the path: " + ignitePathToMdlMetaData);
+            System.out.println("Error reading parquet file with MetaData by the path: " + pathToMdlMetaData);
             e.printStackTrace();
         }
 
@@ -255,7 +284,7 @@ public class SparkModelParser {
 
             final List<IgniteModel<Vector, Double>> models = new ArrayList<>();
             nodesByTreeId.forEach((key, nodes) -> models.add(buildDecisionTreeModel(nodes)));
-            IgniteFunction<Double, Double> lbMapper = lb -> lb > 0.5 ? 1.0 : 0.0;
+
             return new GDBTrainer.GDBModel(models, new WeightedPredictionsAggregator(treeWeights), lbMapper);
         }
         catch (IOException e) {
diff --git a/modules/ml/spark-model-parser/src/main/java/org/apache/ignite/ml/sparkmodelparser/SupportedSparkModels.java b/modules/ml/spark-model-parser/src/main/java/org/apache/ignite/ml/sparkmodelparser/SupportedSparkModels.java
index 26d0394..fa4b8e1 100644
--- a/modules/ml/spark-model-parser/src/main/java/org/apache/ignite/ml/sparkmodelparser/SupportedSparkModels.java
+++ b/modules/ml/spark-model-parser/src/main/java/org/apache/ignite/ml/sparkmodelparser/SupportedSparkModels.java
@@ -47,6 +47,9 @@ public enum SupportedSparkModels {
     /** Random forest regression. */
     RANDOM_FOREST_REGRESSION,
 
+    /** Gradient boosted trees regression. */
+    GRADIENT_BOOSTED_TREES_REGRESSION,
+
     /**
      * Gradient boosted trees.
      * NOTE: support binary classification only with raw labels 0 and 1