You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ignite.apache.org by ch...@apache.org on 2019/01/24 15:25:44 UTC

[ignite] branch master updated: IGNITE-11002: [ML] Add parser for Spark Decision tree classifier model

This is an automated email from the ASF dual-hosted git repository.

chief pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ignite.git


The following commit(s) were added to refs/heads/master by this push:
     new 7c369fe  IGNITE-11002: [ML] Add parser for Spark Decision tree classifier model
7c369fe is described below

commit 7c369fef0211ecfadfbca5361d95b010b29da01a
Author: zaleslaw <za...@gmail.com>
AuthorDate: Thu Jan 24 18:25:24 2019 +0300

    IGNITE-11002: [ML] Add parser for Spark Decision tree classifier model
    
    This closes #5909
---
 .../modelparser/DecisionTreeFromSparkExample.java  |  85 +++++++++++
 .../models/spark/serialized/dt/data/._SUCCESS.crc  | Bin 0 -> 8 bytes
 ...-48b3-8356-9a26f9a6eb0f-c000.snappy.parquet.crc | Bin 0 -> 44 bytes
 .../models/spark/serialized/dt/data/_SUCCESS       |   0
 ...df49-48b3-8356-9a26f9a6eb0f-c000.snappy.parquet | Bin 0 -> 4193 bytes
 .../spark/serialized/dt/metadata/._SUCCESS.crc     | Bin 0 -> 8 bytes
 .../spark/serialized/dt/metadata/.part-00000.crc   | Bin 0 -> 12 bytes
 .../models/spark/serialized/dt/metadata/_SUCCESS   |   0
 .../models/spark/serialized/dt/metadata/part-00000 |   1 +
 .../ml/sparkmodelparser/SparkModelParser.java      | 155 +++++++++++++++++++++
 .../ml/sparkmodelparser/SupportedSparkModels.java  |   3 +
 11 files changed, 244 insertions(+)

diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/spark/modelparser/DecisionTreeFromSparkExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/spark/modelparser/DecisionTreeFromSparkExample.java
new file mode 100644
index 0000000..f220271
--- /dev/null
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/spark/modelparser/DecisionTreeFromSparkExample.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.examples.ml.inference.spark.modelparser;
+
+import java.io.FileNotFoundException;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.Ignition;
+import org.apache.ignite.examples.ml.tutorial.TitanicUtils;
+import org.apache.ignite.ml.math.functions.IgniteBiFunction;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
+import org.apache.ignite.ml.selection.scoring.evaluator.BinaryClassificationEvaluator;
+import org.apache.ignite.ml.selection.scoring.metric.Accuracy;
+import org.apache.ignite.ml.sparkmodelparser.SparkModelParser;
+import org.apache.ignite.ml.sparkmodelparser.SupportedSparkModels;
+import org.apache.ignite.ml.tree.DecisionTreeNode;
+
+/**
+ * Run Decision Tree model loaded from snappy.parquet file.
+ * The snappy.parquet file was generated by Spark MLLib model.write.overwrite().save(..) operator.
+ * <p>
+ * You can change the test data used in this example and re-run it to explore this algorithm further.</p>
+ */
+public class DecisionTreeFromSparkExample {
+    /** Path to Spark DT model. */
+    public static final String SPARK_MDL_PATH = "examples/src/main/resources/models/spark/serialized/dt/data" +
+        "/part-00000-86bc0f70-df49-48b3-8356-9a26f9a6eb0f-c000.snappy.parquet";
+
+    /** Run example. */
+    public static void main(String[] args) throws FileNotFoundException {
+        System.out.println();
+        System.out.println(">>> Decision Tree model loaded from Spark through serialization over partitioned dataset usage example started.");
+        // Start ignite grid.
+        try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
+            System.out.println(">>> Ignite grid started.");
+
+            IgniteCache<Integer, Object[]> dataCache = TitanicUtils.readPassengers(ignite);
+
+            IgniteBiFunction<Integer, Object[], Vector> featureExtractor = (k, v) -> {
+                double[] data = new double[] {(double)v[0], (double)v[5], (double)v[6]};
+                data[0] = Double.isNaN(data[0]) ? 0 : data[0];
+                data[1] = Double.isNaN(data[1]) ? 0 : data[1];
+                data[2] = Double.isNaN(data[2]) ? 0 : data[2];
+
+                return VectorUtils.of(data);
+            };
+
+            IgniteBiFunction<Integer, Object[], Double> lbExtractor = (k, v) -> (double)v[1];
+
+            DecisionTreeNode mdl = (DecisionTreeNode)SparkModelParser.parse(
+                SPARK_MDL_PATH,
+                SupportedSparkModels.DECISION_TREE
+            );
+
+            System.out.println(">>> DT: " + mdl);
+
+            double accuracy = BinaryClassificationEvaluator.evaluate(
+                dataCache,
+                mdl,
+                featureExtractor,
+                lbExtractor,
+                new Accuracy<>()
+            );
+
+            System.out.println("\n>>> Accuracy " + accuracy);
+            System.out.println("\n>>> Test Error " + (1 - accuracy));
+        }
+    }
+}
diff --git a/examples/src/main/resources/models/spark/serialized/dt/data/._SUCCESS.crc b/examples/src/main/resources/models/spark/serialized/dt/data/._SUCCESS.crc
new file mode 100644
index 0000000..3b7b044
Binary files /dev/null and b/examples/src/main/resources/models/spark/serialized/dt/data/._SUCCESS.crc differ
diff --git a/examples/src/main/resources/models/spark/serialized/dt/data/.part-00000-86bc0f70-df49-48b3-8356-9a26f9a6eb0f-c000.snappy.parquet.crc b/examples/src/main/resources/models/spark/serialized/dt/data/.part-00000-86bc0f70-df49-48b3-8356-9a26f9a6eb0f-c000.snappy.parquet.crc
new file mode 100644
index 0000000..81479fa
Binary files /dev/null and b/examples/src/main/resources/models/spark/serialized/dt/data/.part-00000-86bc0f70-df49-48b3-8356-9a26f9a6eb0f-c000.snappy.parquet.crc differ
diff --git a/examples/src/main/resources/models/spark/serialized/dt/data/_SUCCESS b/examples/src/main/resources/models/spark/serialized/dt/data/_SUCCESS
new file mode 100644
index 0000000..e69de29
diff --git a/examples/src/main/resources/models/spark/serialized/dt/data/part-00000-86bc0f70-df49-48b3-8356-9a26f9a6eb0f-c000.snappy.parquet b/examples/src/main/resources/models/spark/serialized/dt/data/part-00000-86bc0f70-df49-48b3-8356-9a26f9a6eb0f-c000.snappy.parquet
new file mode 100644
index 0000000..1d3d04e
Binary files /dev/null and b/examples/src/main/resources/models/spark/serialized/dt/data/part-00000-86bc0f70-df49-48b3-8356-9a26f9a6eb0f-c000.snappy.parquet differ
diff --git a/examples/src/main/resources/models/spark/serialized/dt/metadata/._SUCCESS.crc b/examples/src/main/resources/models/spark/serialized/dt/metadata/._SUCCESS.crc
new file mode 100644
index 0000000..3b7b044
Binary files /dev/null and b/examples/src/main/resources/models/spark/serialized/dt/metadata/._SUCCESS.crc differ
diff --git a/examples/src/main/resources/models/spark/serialized/dt/metadata/.part-00000.crc b/examples/src/main/resources/models/spark/serialized/dt/metadata/.part-00000.crc
new file mode 100644
index 0000000..cdb7c30
Binary files /dev/null and b/examples/src/main/resources/models/spark/serialized/dt/metadata/.part-00000.crc differ
diff --git a/examples/src/main/resources/models/spark/serialized/dt/metadata/_SUCCESS b/examples/src/main/resources/models/spark/serialized/dt/metadata/_SUCCESS
new file mode 100644
index 0000000..e69de29
diff --git a/examples/src/main/resources/models/spark/serialized/dt/metadata/part-00000 b/examples/src/main/resources/models/spark/serialized/dt/metadata/part-00000
new file mode 100644
index 0000000..b27a036
--- /dev/null
+++ b/examples/src/main/resources/models/spark/serialized/dt/metadata/part-00000
@@ -0,0 +1 @@
+{"class":"org.apache.spark.ml.classification.DecisionTreeClassificationModel","timestamp":1548167959360,"sparkVersion":"2.2.0","uid":"dtc_267ccbcc0d51","paramMap":{"impurity":"gini","maxDepth":5,"cacheNodeIds":false,"seed":159147643,"maxBins":32,"checkpointInterval":10,"labelCol":"survived","rawPredictionCol":"rawPrediction","minInstancesPerNode":1,"featuresCol":"features","maxMemoryInMB":256,"probabilityCol":"probability","minInfoGain":0.0,"predictionCol":"prediction"},"numFeatures":3," [...]
diff --git a/modules/ml/spark-model-parser/src/main/java/org/apache/ignite/ml/sparkmodelparser/SparkModelParser.java b/modules/ml/spark-model-parser/src/main/java/org/apache/ignite/ml/sparkmodelparser/SparkModelParser.java
index 9a869e8..8156810 100644
--- a/modules/ml/spark-model-parser/src/main/java/org/apache/ignite/ml/sparkmodelparser/SparkModelParser.java
+++ b/modules/ml/spark-model-parser/src/main/java/org/apache/ignite/ml/sparkmodelparser/SparkModelParser.java
@@ -19,6 +19,9 @@ package org.apache.ignite.ml.sparkmodelparser;
 
 import java.io.File;
 import java.io.IOException;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.TreeMap;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.ignite.internal.util.IgniteUtils;
@@ -28,6 +31,9 @@ import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector;
 import org.apache.ignite.ml.regressions.linear.LinearRegressionModel;
 import org.apache.ignite.ml.regressions.logistic.LogisticRegressionModel;
 import org.apache.ignite.ml.svm.SVMLinearClassificationModel;
+import org.apache.ignite.ml.tree.DecisionTreeConditionalNode;
+import org.apache.ignite.ml.tree.DecisionTreeLeafNode;
+import org.apache.ignite.ml.tree.DecisionTreeNode;
 import org.apache.parquet.column.page.PageReadStore;
 import org.apache.parquet.example.data.Group;
 import org.apache.parquet.example.data.simple.SimpleGroup;
@@ -38,6 +44,8 @@ import org.apache.parquet.io.ColumnIOFactory;
 import org.apache.parquet.io.MessageColumnIO;
 import org.apache.parquet.io.RecordReader;
 import org.apache.parquet.schema.MessageType;
+import org.apache.parquet.schema.Type;
+import org.jetbrains.annotations.NotNull;
 
 /** Parser of Spark models. */
 public class SparkModelParser {
@@ -62,12 +70,121 @@ public class SparkModelParser {
                 return loadLinRegModel(ignitePathToMdl);
             case LINEAR_SVM:
                 return loadLinearSVMModel(ignitePathToMdl);
+            case DECISION_TREE:
+                return loadDecisionTreeModel(ignitePathToMdl);
             default:
                 throw new UnsupportedSparkModelException(ignitePathToMdl);
         }
     }
 
     /**
+     * Load Decision Tree model.
+     *
+     * @param pathToMdl Path to model.
+     */
+    private static Model loadDecisionTreeModel(String pathToMdl) {
+        try (ParquetFileReader r = ParquetFileReader.open(HadoopInputFile.fromPath(new Path(pathToMdl), new Configuration()))) {
+            PageReadStore pages;
+            final MessageType schema = r.getFooter().getFileMetaData().getSchema();
+            final MessageColumnIO colIO = new ColumnIOFactory().getColumnIO(schema);
+            final Map<Integer, NodeData> nodes = new TreeMap<>();
+            while (null != (pages = r.readNextRowGroup())) {
+                final long rows = pages.getRowCount();
+                final RecordReader recordReader = colIO.getRecordReader(pages, new GroupRecordConverter(schema));
+                for (int i = 0; i < rows; i++) {
+                    final SimpleGroup g = (SimpleGroup)recordReader.read();
+                    NodeData nodeData = extractNodeDataFromParquetRow(g);
+                    nodes.put(nodeData.id, nodeData);
+                }
+            }
+            return buildDecisionTreeModel(nodes);
+        }
+        catch (IOException e) {
+            System.out.println("Error reading parquet file.");
+            e.printStackTrace();
+        }
+        return null;
+    }
+
+    /**
+     * Builds the DT model by the given sorted map of nodes.
+     *
+     * @param nodes The sorted map of nodes.
+     */
+    private static Model buildDecisionTreeModel(Map<Integer, NodeData> nodes) {
+        DecisionTreeNode mdl = null;
+        if (!nodes.isEmpty()) {
+            NodeData rootNodeData = (NodeData)((NavigableMap)nodes).firstEntry().getValue();
+            mdl = buildTree(nodes, rootNodeData);
+            return mdl;
+        }
+        return mdl;
+    }
+
+    /**
+     * Build tree or sub-tree based on indices and nodes sorted map as a dictionary.
+     *
+     * @param nodes The sorted map of nodes.
+     * @param rootNodeData Root node data.
+     */
+    @NotNull private static DecisionTreeNode buildTree(Map<Integer, NodeData> nodes,
+        NodeData rootNodeData) {
+        return rootNodeData.isLeafNode ? new DecisionTreeLeafNode(rootNodeData.prediction) : new DecisionTreeConditionalNode(rootNodeData.featureIdx,
+            rootNodeData.threshold,
+            buildTree(nodes, nodes.get(rootNodeData.rightChildId)),
+            buildTree(nodes, nodes.get(rootNodeData.leftChildId)),
+            null);
+    }
+
+    /**
+     * Form the node data according data in parquet row.
+     *
+     * @param g The given group presenting the node data from Spark DT model.
+     */
+    @NotNull private static SparkModelParser.NodeData extractNodeDataFromParquetRow(SimpleGroup g) {
+        NodeData nodeData = new NodeData();
+        nodeData.id = g.getInteger(0, 0);
+        nodeData.prediction = g.getDouble(1, 0);
+        nodeData.leftChildId = g.getInteger(5, 0);
+        nodeData.rightChildId = g.getInteger(6, 0);
+
+        if (nodeData.leftChildId == -1 && nodeData.rightChildId == -1) {
+            nodeData.featureIdx = -1;
+            nodeData.threshold = -1;
+            nodeData.isLeafNode = true;
+        }
+        else {
+            final SimpleGroup splitGrp = (SimpleGroup)g.getGroup(7, 0);
+            nodeData.featureIdx = splitGrp.getInteger(0, 0);
+            nodeData.threshold = splitGrp.getGroup(1, 0).getGroup(0, 0).getDouble(0, 0);
+        }
+        return nodeData;
+    }
+
+    /**
+     * Prints the given group in the row of Parquet file.
+     *
+     * @param g The given group.
+     */
+    private static void printGroup(Group g) {
+        int fieldCnt = g.getType().getFieldCount();
+        for (int field = 0; field < fieldCnt; field++) {
+            int valCnt = g.getFieldRepetitionCount(field);
+
+            Type fieldType = g.getType().getType(field);
+            String fieldName = fieldType.getName();
+
+            for (int idx = 0; idx < valCnt; idx++) {
+                if (fieldType.isPrimitive())
+                    System.out.println(fieldName + " " + g.getValueToString(field, idx));
+                else
+                    printGroup(g.getGroup(field, idx));
+            }
+        }
+        System.out.println();
+    }
+
+    /**
      * Load SVM model.
      *
      * @param pathToMdl Path to model.
@@ -258,4 +375,42 @@ public class SparkModelParser {
         }
         return coefficients;
     }
+
+    /**
+     * Presenting data from one parquet row filled with NodeData in Spark DT model.
+     */
+    private static class NodeData {
+        /** Id. */
+        int id;
+
+        /** Prediction. */
+        double prediction;
+
+        /** Left child id. */
+        int leftChildId;
+
+        /** Right child id. */
+        int rightChildId;
+
+        /** Threshold. */
+        double threshold;
+
+        /** Feature index. */
+        int featureIdx;
+
+        /** Is leaf node. */
+        boolean isLeafNode;
+
+        @Override public String toString() {
+            return "NodeData{" +
+                "id=" + id +
+                ", prediction=" + prediction +
+                ", leftChildId=" + leftChildId +
+                ", rightChildId=" + rightChildId +
+                ", threshold=" + threshold +
+                ", featureIdx=" + featureIdx +
+                ", isLeafNode=" + isLeafNode +
+                '}';
+        }
+    }
 }
diff --git a/modules/ml/spark-model-parser/src/main/java/org/apache/ignite/ml/sparkmodelparser/SupportedSparkModels.java b/modules/ml/spark-model-parser/src/main/java/org/apache/ignite/ml/sparkmodelparser/SupportedSparkModels.java
index c47ca01..2064a0c 100644
--- a/modules/ml/spark-model-parser/src/main/java/org/apache/ignite/ml/sparkmodelparser/SupportedSparkModels.java
+++ b/modules/ml/spark-model-parser/src/main/java/org/apache/ignite/ml/sparkmodelparser/SupportedSparkModels.java
@@ -29,6 +29,9 @@ public enum SupportedSparkModels {
     /** Linear regression. */
     LINEAR_REGRESSION,
 
+    /** Decision tree. */
+    DECISION_TREE,
+
     /** Support Vector Machine . */
     LINEAR_SVM
 }