You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ignite.apache.org by ch...@apache.org on 2019/01/28 09:41:19 UTC

[ignite] branch master updated: IGNITE-11005: [ML] Add parser for Spark Gradient-boosted tree classifier

This is an automated email from the ASF dual-hosted git repository.

chief pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ignite.git


The following commit(s) were added to refs/heads/master by this push:
     new f73878d  IGNITE-11005: [ML] Add parser for Spark Gradient-boosted tree classifier
f73878d is described below

commit f73878de66ef383e517d5b9d0bfe7efbc26b0c56
Author: zaleslaw <za...@gmail.com>
AuthorDate: Mon Jan 28 12:41:00 2019 +0300

    IGNITE-11005: [ML] Add parser for Spark Gradient-boosted tree classifier
    
    This closes #5940
---
 .../spark/modelparser/GBTFromSparkExample.java     |  89 +++++++++++++++++
 .../models/spark/serialized/gbt/data/._SUCCESS.crc | Bin 0 -> 8 bytes
 ...-4b1f-9716-fbedf7caba2d-c000.snappy.parquet.crc | Bin 0 -> 288 bytes
 .../models/spark/serialized/gbt/data/_SUCCESS      |   0
 ...6344-4b1f-9716-fbedf7caba2d-c000.snappy.parquet | Bin 0 -> 35369 bytes
 .../spark/serialized/gbt/metadata/._SUCCESS.crc    | Bin 0 -> 8 bytes
 .../spark/serialized/gbt/metadata/.part-00000.crc  | Bin 0 -> 16 bytes
 .../models/spark/serialized/gbt/metadata/_SUCCESS  |   0
 .../spark/serialized/gbt/metadata/part-00000       |   1 +
 .../serialized/gbt/treesMetadata/._SUCCESS.crc     | Bin 0 -> 8 bytes
 ...-4d24-9900-be8a4396710b-c000.snappy.parquet.crc | Bin 0 -> 36 bytes
 .../spark/serialized/gbt/treesMetadata/_SUCCESS    |   0
 ...e1e6-4d24-9900-be8a4396710b-c000.snappy.parquet | Bin 0 -> 3370 bytes
 .../ml/sparkmodelparser/SparkModelParser.java      | 109 ++++++++++++++++++++-
 .../ml/sparkmodelparser/SupportedSparkModels.java  |   8 +-
 15 files changed, 205 insertions(+), 2 deletions(-)

diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/spark/modelparser/GBTFromSparkExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/spark/modelparser/GBTFromSparkExample.java
new file mode 100644
index 0000000..c62e839
--- /dev/null
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/spark/modelparser/GBTFromSparkExample.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.examples.ml.inference.spark.modelparser;
+
+import java.io.FileNotFoundException;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.Ignition;
+import org.apache.ignite.examples.ml.tutorial.TitanicUtils;
+import org.apache.ignite.ml.composition.ModelsComposition;
+import org.apache.ignite.ml.math.functions.IgniteBiFunction;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
+import org.apache.ignite.ml.selection.scoring.evaluator.BinaryClassificationEvaluator;
+import org.apache.ignite.ml.selection.scoring.metric.Accuracy;
+import org.apache.ignite.ml.sparkmodelparser.SparkModelParser;
+import org.apache.ignite.ml.sparkmodelparser.SupportedSparkModels;
+
+/**
+ * Run Gradient Boosted trees model loaded from snappy.parquet file.
+ * The snappy.parquet file was generated by Spark MLLib model.write.overwrite().save(..) operator.
+ * <p>
+ * You can change the test data used in this example and re-run it to explore this algorithm further.</p>
+ */
+public class GBTFromSparkExample {
+    /** Path to Spark LogReg model. */
+    public static final String SPARK_MDL_PATH = "examples/src/main/resources/models/spark/serialized/gbt/data" +
+        "/part-00000-ea23dcda-6344-4b1f-9716-fbedf7caba2d-c000.snappy.parquet";
+
+    /** Spark model metadata path. */
+    private static final String SPARK_MDL_METADATA_PATH = "examples/src/main/resources/models/spark/serialized/gbt/treesMetadata" +
+        "/part-00000-9033203a-e1e6-4d24-9900-be8a4396710b-c000.snappy.parquet";
+
+    /** Run example. */
+    public static void main(String[] args) throws FileNotFoundException {
+        System.out.println();
+        System.out.println(">>> Gradient Boosted trees model loaded from Spark through serialization over partitioned dataset usage example started.");
+        // Start ignite grid.
+        try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
+            System.out.println(">>> Ignite grid started.");
+
+            IgniteCache<Integer, Object[]> dataCache = TitanicUtils.readPassengers(ignite);
+
+            IgniteBiFunction<Integer, Object[], Vector> featureExtractor = (k, v) -> {
+                double[] data = new double[] {(double)v[0], (double)v[5], (double)v[6]};
+                data[0] = Double.isNaN(data[0]) ? 0 : data[0];
+                data[1] = Double.isNaN(data[1]) ? 0 : data[1];
+                data[2] = Double.isNaN(data[2]) ? 0 : data[2];
+
+                return VectorUtils.of(data);
+            };
+
+            IgniteBiFunction<Integer, Object[], Double> lbExtractor = (k, v) -> (double)v[1];
+
+            ModelsComposition mdl = (ModelsComposition)SparkModelParser.parseWithMetadata(
+                SPARK_MDL_PATH, SPARK_MDL_METADATA_PATH,
+                SupportedSparkModels.GRADIENT_BOOSTED_TREES
+            );
+
+            System.out.println(">>> GBT: " + mdl.toString(true));
+
+            double accuracy = BinaryClassificationEvaluator.evaluate(
+                dataCache,
+                mdl,
+                featureExtractor,
+                lbExtractor,
+                new Accuracy<>()
+            );
+
+            System.out.println("\n>>> Accuracy " + accuracy);
+            System.out.println("\n>>> Test Error " + (1 - accuracy));
+        }
+    }
+}
diff --git a/examples/src/main/resources/models/spark/serialized/gbt/data/._SUCCESS.crc b/examples/src/main/resources/models/spark/serialized/gbt/data/._SUCCESS.crc
new file mode 100644
index 0000000..3b7b044
Binary files /dev/null and b/examples/src/main/resources/models/spark/serialized/gbt/data/._SUCCESS.crc differ
diff --git a/examples/src/main/resources/models/spark/serialized/gbt/data/.part-00000-ea23dcda-6344-4b1f-9716-fbedf7caba2d-c000.snappy.parquet.crc b/examples/src/main/resources/models/spark/serialized/gbt/data/.part-00000-ea23dcda-6344-4b1f-9716-fbedf7caba2d-c000.snappy.parquet.crc
new file mode 100644
index 0000000..ee329e9
Binary files /dev/null and b/examples/src/main/resources/models/spark/serialized/gbt/data/.part-00000-ea23dcda-6344-4b1f-9716-fbedf7caba2d-c000.snappy.parquet.crc differ
diff --git a/examples/src/main/resources/models/spark/serialized/gbt/data/_SUCCESS b/examples/src/main/resources/models/spark/serialized/gbt/data/_SUCCESS
new file mode 100644
index 0000000..e69de29
diff --git a/examples/src/main/resources/models/spark/serialized/gbt/data/part-00000-ea23dcda-6344-4b1f-9716-fbedf7caba2d-c000.snappy.parquet b/examples/src/main/resources/models/spark/serialized/gbt/data/part-00000-ea23dcda-6344-4b1f-9716-fbedf7caba2d-c000.snappy.parquet
new file mode 100644
index 0000000..9fb2aa0
Binary files /dev/null and b/examples/src/main/resources/models/spark/serialized/gbt/data/part-00000-ea23dcda-6344-4b1f-9716-fbedf7caba2d-c000.snappy.parquet differ
diff --git a/examples/src/main/resources/models/spark/serialized/gbt/metadata/._SUCCESS.crc b/examples/src/main/resources/models/spark/serialized/gbt/metadata/._SUCCESS.crc
new file mode 100644
index 0000000..3b7b044
Binary files /dev/null and b/examples/src/main/resources/models/spark/serialized/gbt/metadata/._SUCCESS.crc differ
diff --git a/examples/src/main/resources/models/spark/serialized/gbt/metadata/.part-00000.crc b/examples/src/main/resources/models/spark/serialized/gbt/metadata/.part-00000.crc
new file mode 100644
index 0000000..b30ab3d
Binary files /dev/null and b/examples/src/main/resources/models/spark/serialized/gbt/metadata/.part-00000.crc differ
diff --git a/examples/src/main/resources/models/spark/serialized/gbt/metadata/_SUCCESS b/examples/src/main/resources/models/spark/serialized/gbt/metadata/_SUCCESS
new file mode 100644
index 0000000..e69de29
diff --git a/examples/src/main/resources/models/spark/serialized/gbt/metadata/part-00000 b/examples/src/main/resources/models/spark/serialized/gbt/metadata/part-00000
new file mode 100644
index 0000000..097b5d9
--- /dev/null
+++ b/examples/src/main/resources/models/spark/serialized/gbt/metadata/part-00000
@@ -0,0 +1 @@
+{"class":"org.apache.spark.ml.classification.GBTClassificationModel","timestamp":1548233686134,"sparkVersion":"2.2.0","uid":"gbtc_bc5b0cce5380","paramMap":{"maxDepth":7,"impurity":"gini","subsamplingRate":1.0,"maxMemoryInMB":256,"seed":-1287390502,"cacheNodeIds":false,"labelCol":"survived","maxIter":10,"minInstancesPerNode":1,"predictionCol":"prediction","stepSize":0.1,"probabilityCol":"probability","featuresCol":"features","lossType":"logistic","rawPredictionCol":"rawPrediction","maxBin [...]
diff --git a/examples/src/main/resources/models/spark/serialized/gbt/treesMetadata/._SUCCESS.crc b/examples/src/main/resources/models/spark/serialized/gbt/treesMetadata/._SUCCESS.crc
new file mode 100644
index 0000000..3b7b044
Binary files /dev/null and b/examples/src/main/resources/models/spark/serialized/gbt/treesMetadata/._SUCCESS.crc differ
diff --git a/examples/src/main/resources/models/spark/serialized/gbt/treesMetadata/.part-00000-9033203a-e1e6-4d24-9900-be8a4396710b-c000.snappy.parquet.crc b/examples/src/main/resources/models/spark/serialized/gbt/treesMetadata/.part-00000-9033203a-e1e6-4d24-9900-be8a4396710b-c000.snappy.parquet.crc
new file mode 100644
index 0000000..1f162ed
Binary files /dev/null and b/examples/src/main/resources/models/spark/serialized/gbt/treesMetadata/.part-00000-9033203a-e1e6-4d24-9900-be8a4396710b-c000.snappy.parquet.crc differ
diff --git a/examples/src/main/resources/models/spark/serialized/gbt/treesMetadata/_SUCCESS b/examples/src/main/resources/models/spark/serialized/gbt/treesMetadata/_SUCCESS
new file mode 100644
index 0000000..e69de29
diff --git a/examples/src/main/resources/models/spark/serialized/gbt/treesMetadata/part-00000-9033203a-e1e6-4d24-9900-be8a4396710b-c000.snappy.parquet b/examples/src/main/resources/models/spark/serialized/gbt/treesMetadata/part-00000-9033203a-e1e6-4d24-9900-be8a4396710b-c000.snappy.parquet
new file mode 100644
index 0000000..35e68c5
Binary files /dev/null and b/examples/src/main/resources/models/spark/serialized/gbt/treesMetadata/part-00000-9033203a-e1e6-4d24-9900-be8a4396710b-c000.snappy.parquet differ
diff --git a/modules/ml/spark-model-parser/src/main/java/org/apache/ignite/ml/sparkmodelparser/SparkModelParser.java b/modules/ml/spark-model-parser/src/main/java/org/apache/ignite/ml/sparkmodelparser/SparkModelParser.java
index e329233..31fbed4 100644
--- a/modules/ml/spark-model-parser/src/main/java/org/apache/ignite/ml/sparkmodelparser/SparkModelParser.java
+++ b/modules/ml/spark-model-parser/src/main/java/org/apache/ignite/ml/sparkmodelparser/SparkModelParser.java
@@ -20,6 +20,7 @@ package org.apache.ignite.ml.sparkmodelparser;
 import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.NavigableMap;
@@ -29,8 +30,11 @@ import org.apache.hadoop.fs.Path;
 import org.apache.ignite.internal.util.IgniteUtils;
 import org.apache.ignite.ml.IgniteModel;
 import org.apache.ignite.ml.composition.ModelsComposition;
+import org.apache.ignite.ml.composition.boosting.GDBTrainer;
 import org.apache.ignite.ml.composition.predictionsaggregator.OnMajorityPredictionsAggregator;
+import org.apache.ignite.ml.composition.predictionsaggregator.WeightedPredictionsAggregator;
 import org.apache.ignite.ml.inference.Model;
+import org.apache.ignite.ml.math.functions.IgniteFunction;
 import org.apache.ignite.ml.math.primitives.vector.Vector;
 import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector;
 import org.apache.ignite.ml.regressions.linear.LinearRegressionModel;
@@ -85,7 +89,110 @@ public class SparkModelParser {
     }
 
     /**
-     * Load Random Forest model.
+     * Load model and its metadata from parquet files.
+     *
+     * @param pathToMdl Hadoop path to model saved from Spark.
+     * @param pathToMetaData Hadoop path to metadata saved from Spark.
+     * @param parsedSparkMdl One of supported Spark models to parse it.
+     * @return Instance of parsedSparkMdl model.
+     */
+    public static Model parseWithMetadata(String pathToMdl, String pathToMetaData,
+        SupportedSparkModels parsedSparkMdl) {
+        File mdlRsrc1 = IgniteUtils.resolveIgnitePath(pathToMdl);
+        if (mdlRsrc1 == null)
+            throw new IllegalArgumentException("Resource not found [resource_path=" + pathToMdl + "]");
+
+        String ignitePathToMdl = mdlRsrc1.getPath();
+
+        File mdlRsrc2 = IgniteUtils.resolveIgnitePath(pathToMetaData);
+        if (mdlRsrc2 == null)
+            throw new IllegalArgumentException("Resource not found [resource_path=" + pathToMetaData + "]");
+
+        String ignitePathToMdlMetaData = mdlRsrc2.getPath();
+
+        switch (parsedSparkMdl) {
+            case GRADIENT_BOOSTED_TREES:
+                return loadGBTClassifierModel(ignitePathToMdl, ignitePathToMdlMetaData);
+            default:
+                throw new UnsupportedSparkModelException(ignitePathToMdl);
+        }
+    }
+
+    /**
+     * Load GBT model.
+     *
+     * @param pathToMdl Path to model.
+     * @param ignitePathToMdlMetaData Ignite path to model meta data.
+     */
+    private static Model loadGBTClassifierModel(String pathToMdl, String ignitePathToMdlMetaData) {
+        double[] treeWeights = null;
+        final Map<Integer, Double> treeWeightsByTreeID = new HashMap<>();
+
+        try (ParquetFileReader r = ParquetFileReader.open(HadoopInputFile.fromPath(new Path(ignitePathToMdlMetaData), new Configuration()))) {
+            PageReadStore pagesMetaData;
+            final MessageType schema = r.getFooter().getFileMetaData().getSchema();
+            final MessageColumnIO colIO = new ColumnIOFactory().getColumnIO(schema);
+
+            while (null != (pagesMetaData = r.readNextRowGroup())) {
+                final long rows = pagesMetaData.getRowCount();
+                final RecordReader recordReader = colIO.getRecordReader(pagesMetaData, new GroupRecordConverter(schema));
+                for (int i = 0; i < rows; i++) {
+                    final SimpleGroup g = (SimpleGroup)recordReader.read();
+                    int treeId = g.getInteger(0, 0);
+                    double treeWeight = g.getDouble(2, 0);
+                    treeWeightsByTreeID.put(treeId, treeWeight);
+                }
+            }
+        }
+        catch (IOException e) {
+            System.out.println("Error reading parquet file with MetaData by the path: " + ignitePathToMdlMetaData);
+            e.printStackTrace();
+        }
+
+        treeWeights = new double[treeWeightsByTreeID.size()];
+        for (int i = 0; i < treeWeights.length; i++)
+            treeWeights[i] = treeWeightsByTreeID.get(i);
+
+        try (ParquetFileReader r = ParquetFileReader.open(HadoopInputFile.fromPath(new Path(pathToMdl), new Configuration()))) {
+            PageReadStore pages;
+            final MessageType schema = r.getFooter().getFileMetaData().getSchema();
+            final MessageColumnIO colIO = new ColumnIOFactory().getColumnIO(schema);
+            final Map<Integer, TreeMap<Integer, NodeData>> nodesByTreeId = new TreeMap<>();
+            while (null != (pages = r.readNextRowGroup())) {
+                final long rows = pages.getRowCount();
+                final RecordReader recordReader = colIO.getRecordReader(pages, new GroupRecordConverter(schema));
+                for (int i = 0; i < rows; i++) {
+                    final SimpleGroup g = (SimpleGroup)recordReader.read();
+                    final int treeID = g.getInteger(0, 0);
+                    final SimpleGroup nodeDataGroup = (SimpleGroup)g.getGroup(1, 0);
+                    NodeData nodeData = extractNodeDataFromParquetRow(nodeDataGroup);
+
+                    if (nodesByTreeId.containsKey(treeID)) {
+                        Map<Integer, NodeData> nodesByNodeId = nodesByTreeId.get(treeID);
+                        nodesByNodeId.put(nodeData.id, nodeData);
+                    }
+                    else {
+                        TreeMap<Integer, NodeData> nodesByNodeId = new TreeMap<>();
+                        nodesByNodeId.put(nodeData.id, nodeData);
+                        nodesByTreeId.put(treeID, nodesByNodeId);
+                    }
+                }
+            }
+
+            final List<IgniteModel<Vector, Double>> models = new ArrayList<>();
+            nodesByTreeId.forEach((key, nodes) -> models.add(buildDecisionTreeModel(nodes)));
+            IgniteFunction<Double, Double> lbMapper = lb -> lb > 0.5 ? 1.0 : 0.0;
+            return new GDBTrainer.GDBModel(models, new WeightedPredictionsAggregator(treeWeights), lbMapper);
+        }
+        catch (IOException e) {
+            System.out.println("Error reading parquet file.");
+            e.printStackTrace();
+        }
+        return null;
+    }
+
+    /**
+     * Load RF model.
      *
      * @param pathToMdl Path to model.
      */
diff --git a/modules/ml/spark-model-parser/src/main/java/org/apache/ignite/ml/sparkmodelparser/SupportedSparkModels.java b/modules/ml/spark-model-parser/src/main/java/org/apache/ignite/ml/sparkmodelparser/SupportedSparkModels.java
index 0c32d8e..9d9a9e9 100644
--- a/modules/ml/spark-model-parser/src/main/java/org/apache/ignite/ml/sparkmodelparser/SupportedSparkModels.java
+++ b/modules/ml/spark-model-parser/src/main/java/org/apache/ignite/ml/sparkmodelparser/SupportedSparkModels.java
@@ -36,5 +36,11 @@ public enum SupportedSparkModels {
     LINEAR_SVM,
 
     /** Random forest. */
-    RANDOM_FOREST
+    RANDOM_FOREST,
+
+    /**
+     * Gradient boosted trees.
+     * NOTE: support binary classification only with raw labels 0 and 1
+     */
+    GRADIENT_BOOSTED_TREES
 }