You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@madlib.apache.org by ri...@apache.org on 2016/03/30 02:58:54 UTC

[14/51] [partial] incubator-madlib-site git commit: Add all files from old site (madlib.net)

http://git-wip-us.apache.org/repos/asf/incubator-madlib-site/blob/7258f51a/docs/latest/group__grp__decision__tree.html
----------------------------------------------------------------------
diff --git a/docs/latest/group__grp__decision__tree.html b/docs/latest/group__grp__decision__tree.html
new file mode 100644
index 0000000..e5d6ce8
--- /dev/null
+++ b/docs/latest/group__grp__decision__tree.html
@@ -0,0 +1,694 @@
+<!-- HTML header for doxygen 1.8.4-->
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<meta name="generator" content="Doxygen 1.8.10"/>
+<meta name="keywords" content="madlib,postgres,greenplum,machine learning,data mining,deep learning,ensemble methods,data science,market basket analysis,affinity analysis,pca,lda,regression,elastic net,huber white,proportional hazards,k-means,latent dirichlet allocation,bayes,support vector machines,svm"/>
+<title>MADlib: Decision Tree</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="navtree.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="resize.js"></script>
+<script type="text/javascript" src="navtreedata.js"></script>
+<script type="text/javascript" src="navtree.js"></script>
+<script type="text/javascript">
+  $(document).ready(initResizable);
+  $(window).load(resizeHeight);
+</script>
+<link href="search/search.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="search/searchdata.js"></script>
+<script type="text/javascript" src="search/search.js"></script>
+<script type="text/javascript">
+  $(document).ready(function() { init_search(); });
+</script>
+<script type="text/x-mathjax-config">
+  MathJax.Hub.Config({
+    extensions: ["tex2jax.js", "TeX/AMSmath.js", "TeX/AMSsymbols.js"],
+    jax: ["input/TeX","output/HTML-CSS"],
+});
+</script><script src="../mathjax/MathJax.js"></script>
+<!-- hack in the navigation tree -->
+<script type="text/javascript" src="navtree_hack.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+<link href="madlib_extra.css" rel="stylesheet" type="text/css"/>
+<!-- google analytics -->
+<script>
+  (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
+  (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
+  m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
+  })(window,document,'script','//www.google-analytics.com/analytics.js','ga');
+  ga('create', 'UA-45382226-1', 'auto');
+  ga('send', 'pageview');
+</script>
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td id="projectlogo"><a href="http://madlib.incubator.apache.org"><img alt="Logo" src="madlib.png" height="50" style="padding-left:0.5em;" border="0"/ ></a></td>
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">
+   <span id="projectnumber">1.8</span>
+   </div>
+   <div id="projectbrief">User Documentation for MADlib</div>
+  </td>
+   <td>        <div id="MSearchBox" class="MSearchBoxInactive">
+        <span class="left">
+          <img id="MSearchSelect" src="search/mag_sel.png"
+               onmouseover="return searchBox.OnSearchSelectShow()"
+               onmouseout="return searchBox.OnSearchSelectHide()"
+               alt=""/>
+          <input type="text" id="MSearchField" value="Search" accesskey="S"
+               onfocus="searchBox.OnSearchFieldFocus(true)" 
+               onblur="searchBox.OnSearchFieldFocus(false)" 
+               onkeyup="searchBox.OnSearchFieldChange(event)"/>
+          </span><span class="right">
+            <a id="MSearchClose" href="javascript:searchBox.CloseResultsWindow()"><img id="MSearchCloseImg" border="0" src="search/close.png" alt=""/></a>
+          </span>
+        </div>
+</td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.10 -->
+<script type="text/javascript">
+var searchBox = new SearchBox("searchBox", "search",false,'Search');
+</script>
+</div><!-- top -->
+<div id="side-nav" class="ui-resizable side-nav-resizable">
+  <div id="nav-tree">
+    <div id="nav-tree-contents">
+      <div id="nav-sync" class="sync"></div>
+    </div>
+  </div>
+  <div id="splitbar" style="-moz-user-select:none;" 
+       class="ui-resizable-handle">
+  </div>
+</div>
+<script type="text/javascript">
+$(document).ready(function(){initNavTree('group__grp__decision__tree.html','');});
+</script>
+<div id="doc-content">
+<!-- window showing the filter options -->
+<div id="MSearchSelectWindow"
+     onmouseover="return searchBox.OnSearchSelectShow()"
+     onmouseout="return searchBox.OnSearchSelectHide()"
+     onkeydown="return searchBox.OnSearchSelectKey(event)">
+</div>
+
+<!-- iframe showing the search results (closed by default) -->
+<div id="MSearchResultsWindow">
+<iframe src="javascript:void(0)" frameborder="0" 
+        name="MSearchResults" id="MSearchResults">
+</iframe>
+</div>
+
+<div class="header">
+  <div class="headertitle">
+<div class="title">Decision Tree<div class="ingroups"><a class="el" href="group__grp__super.html">Supervised Learning</a> &raquo; <a class="el" href="group__grp__tree.html">Tree Methods</a></div></div>  </div>
+</div><!--header-->
+<div class="contents">
+<div class="toc"><b>Contents</b></p><ul>
+<li class="level1">
+<a href="#train">Training Function</a> </li>
+<li class="level1">
+<a href="#predict">Prediction Function</a> </li>
+<li class="level1">
+<a href="#display">Display Function</a> </li>
+<li class="level1">
+<a href="#examples">Examples</a> </li>
+<li class="level1">
+<a href="#related">Related Topics</a> </li>
+</ul>
+</div><p>Decision trees are a supervised learning method that uses a predictive model to predict the value of a target variable, based on several input variables. They use a tree-based representation of the model such that, the interior nodes of the tree correspond to the input variables, the edges of the nodes correspond to values that the input variables can take, and leaf nodes represent values of the target variable, given the values of the input variables, represented by the path from the root to the leaf nodes.</p>
+<p><a class="anchor" id="train"></a></p><dl class="section user"><dt>Training Function</dt><dd>We implement the decision tree using the CART algorithm, introduced by Breiman et al. [1]. The training function has the following syntax: <pre class="syntax">
+tree_train(
+    training_table_name,
+    output_table_name,
+    id_col_name,
+    dependent_variable,
+    list_of_features,
+    list_of_features_to_exclude,
+    split_criterion,
+    grouping_cols,
+    weights,
+    max_depth,
+    min_split,
+    min_bucket,
+    num_splits,
+    pruning_params,
+    surrogate_params,
+    verbosity
+    )
+</pre> <b>Arguments</b> <dl class="arglist">
+<dt>training_table_name </dt>
+<dd><p class="startdd">TEXT. The name of the table containing the training data</p>
+<p class="enddd"></p>
+</dd>
+<dt>output_table_name </dt>
+<dd><p class="startdd">TEXT. The name of the generated table containing the model. If a table with the same name already exists, then the function will return an error.</p>
+<p>The model table produced by the train function contains the following columns:</p>
+<table  class="output">
+<tr>
+<th>&lt;...&gt; </th><td>Grouping columns, if provided in input, same types as in the training table. This could be multiple columns depending on the <code>grouping_cols</code> input.  </td></tr>
+<tr>
+<th>tree </th><td>BYTEA8. Trained decision tree model stored in a binary format.  </td></tr>
+<tr>
+<th>cat_levels_in_text </th><td>TEXT[]. Ordered levels of categorical variables  </td></tr>
+<tr>
+<th>cat_n_levels </th><td><p class="starttd">INTEGER[]. Number of levels for each categorical variable </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>tree_depth </th><td><p class="starttd">INTEGER. The maximum depth the tree obtained after training (root has depth 0) </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>pruning_cp </th><td><p class="starttd">DOUBLE PRECISION. The cost-complexity parameter used for pruning the trained tree(s). This would be different from the input cp value if cross-validation is used.  </p>
+<p class="endtd"></p>
+</td></tr>
+</table>
+<p>A summary table named <em>&lt;model_table&gt;_summary</em> is also created at the same time, which has the following columns: </p><table  class="output">
+<tr>
+<th>method </th><td><p class="starttd">TEXT. 'tree_train' </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>is_classification </th><td><p class="starttd">BOOLEAN. TRUE if the decision trees are for classification, FALSE if regression </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>source_table </th><td><p class="starttd">TEXT. The data source table name </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>model_table </th><td><p class="starttd">TEXT. The model table name </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>id_col_name </th><td><p class="starttd">TEXT. The ID column name </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>dependent_varname </th><td><p class="starttd">TEXT. The dependent variable </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>independent_varname </th><td><p class="starttd">TEXT. The independent variables </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>cat_features </th><td>TEXT. The list of categorical feature names as a comma-separated string  </td></tr>
+<tr>
+<th>con_features </th><td><p class="starttd">TEXT. The list of continuous feature names as a comma-separated string </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>grouping_col </th><td><p class="starttd">TEXT. Names of grouping columns </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>num_all_groups </th><td><p class="starttd">INTEGER. Number of groups in decision tree training </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>num_failed_groups </th><td><p class="starttd">INTEGER. Number of failed groups in decision tree training </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>total_rows_processed </th><td><p class="starttd">BIGINT. Total numbers of rows processed in all groups </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>total_rows_skipped </th><td><p class="starttd">BIGINT. Total numbers of rows skipped in all groups due to missing values or failures </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>dependent_var_levels </th><td><p class="starttd">TEXT. For classification, the distinct levels of the dependent variable </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>dependent_var_type </th><td><p class="starttd">TEXT. The type of dependent variable </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>input_cp </th><td><p class="starttd">DOUBLE PRECISION. The complexity parameter (cp) used for pruning the trained tree(s) (before cross-validation is run). This is same as the cp value inputed through the <em>pruning_params</em> </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>independent_var_types </th><td><p class="starttd">TEXT. A comma separated string, the types of independent variables </p>
+<p class="endtd"></p>
+</td></tr>
+</table>
+<p class="enddd"></p>
+</dd>
+<dt>id_col_name </dt>
+<dd><p class="startdd">TEXT. Name of the column containing id information in the training data. This is a mandatory argument and is used for prediction and cross-validation. The values are expected to be unique for each row </p>
+<p class="enddd"></p>
+</dd>
+<dt>dependent_variable </dt>
+<dd><p class="startdd">TEXT. Name of the column that contains the output (response) for training. Boolean, integer and text types are considered classification outputs, while double precision values are considered regression outputs. The response variable for a classification tree can be multinomial, but the time and space complexity of train function increases linearly as the number of response classes increases.</p>
+<p class="enddd"></p>
+</dd>
+<dt>list_of_features </dt>
+<dd><p class="startdd">TEXT. Comma-separated string of column names to use as predictors. Can also be a '*' implying all columns are to be used as predictors (except the ones included in the next argument). The types of the features can be mixed where boolean, integer, and text columns are considered categorical and double precision columns are considered continuous. The categorical variables are not encoded and used as is for the training.</p>
+<p>There are no limitations to the number of levels in a categorical variable. It is, however, important to note that we don't test for every combination of levels of a categorical variable for evaluating a split. We order the levels of the variable by the entropy of the varible in predicting the response. The splits at each node is evaluated between these ordered levels </p>
+<p class="enddd"></p>
+</dd>
+<dt>list_of_features_to_exclude </dt>
+<dd><p class="startdd">TEXT. Comma-separated string of column names to exclude from the predictors list. If the <em>dependent_variable</em> is an expression (including cast of a column name), then this list should include all columns present in the <em>dependent_variable</em> expression, otherwise those columns will be included in the features. The names in this parameter should be identical to the names used in the table and quoted appropriately</p>
+<p class="enddd"></p>
+</dd>
+<dt>split_criterion </dt>
+<dd><p class="startdd">TEXT, default = 'gini' for classification, 'mse' for regression. Impurity function to compute the feature to use for the split. Supported criteria are 'gini', 'entropy', 'misclassification' for classification trees. For regression trees, split_criterion of 'mse' is always used (irrespective of the input for this argument) </p>
+<p class="enddd"></p>
+</dd>
+<dt>grouping_cols (optional) </dt>
+<dd><p class="startdd">TEXT, default: NULL. Comma-separated list of column names to group the data by. This will lead to creating multiple decision trees, one for each group</p>
+<p class="enddd"></p>
+</dd>
+<dt>weights (optional) </dt>
+<dd><p class="startdd">TEXT. Column name containing weights for each observation</p>
+<p class="enddd"></p>
+</dd>
+<dt>max_depth (optional) </dt>
+<dd><p class="startdd">INTEGER, default: 10. Maximum depth of any node of the final tree, with the root node counted as depth 0</p>
+<p class="enddd"></p>
+</dd>
+<dt>min_split (optional) </dt>
+<dd><p class="startdd">INTEGER, default: 20. Minimum number of observations that must exist in a node for a split to be attempted. The best value for this parameter depends on the number of tuples in the dataset</p>
+<p class="enddd"></p>
+</dd>
+<dt>min_bucket (optional) </dt>
+<dd><p class="startdd">INTEGER, default: min_split/3. Minimum number of observations in any terminal node. If only one of min_bucket or min_split is specified, min_split is set to min_bucket*3 or min_bucket to min_split/3, as appropriate</p>
+<p class="enddd"></p>
+</dd>
+<dt>num_splits (optional) </dt>
+<dd><p class="startdd">INTEGER, default: 100. Continuous-valued features are binned into discrete quantiles to compute split boundaries. This global parameter is used to compute the resolution of splits for continuous features. Higher number of bins will lead to better prediction, but will also result in higher processing time</p>
+<p class="enddd"></p>
+</dd>
+<dt>pruning_params (optional) </dt>
+<dd><p class="startdd">TEXT. Comma-separated string of key-value pairs giving the parameters for pruning the tree. The parameters currently accepted are: </p><table  class="output">
+<tr>
+<th>cp </th><td><p class="starttd">Default: 0. A split on a node is attempted only if it decreases the overall lack of fit by a factor of 'cp', else the split is pruned away. This value is used to create an initial tree before running cross-validation (see below).</p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>n_folds </th><td><p class="starttd">Default: 0 (i.e. No cross-validation). Number of cross-validation folds to use to compute the best value of <em>cp</em>. To perform cross-validation, a positive value of <em>n_folds</em> (greater than 2) should be given. An additional output table <em>&lt;model_table&gt;_cv</em> is created containing the values of evaluated <em>cp</em> and the cross-validation error. The tree returned in the output table corresponds to the <em>cp</em> with the lowest cross-validation error (we pick the maximum <em>cp</em> if multiple values have same error).</p>
+<p>The list of <em>cp</em> values are automatically computed by parsing through the tree initially trained on the complete dataset. The tree outputted is a subset of this initial tree corresponding to the best computed <em>cp</em>.</p>
+<p class="endtd"></p>
+</td></tr>
+</table>
+<p class="enddd"></p>
+</dd>
+<dt>surrogate_params </dt>
+<dd><p class="startdd">TEXT. Comma-separated string of key-value pairs controlling the behavior of surrogate splits for each node. A surrogate variable is another predictor variable that is associated (correlated) with the primary predictor variable for a split. The surrogate variable comes into use when the primary predictior value is NULL. This parameter currently accepts the below argument: </p><table  class="output">
+<tr>
+<th>max_surrogates </th><td>Default: 0. Number of surrogates to store for each node  </td></tr>
+</table>
+<p class="enddd"></p>
+</dd>
+<dt>verbosity (optional) </dt>
+<dd>BOOLEAN, default: FALSE. Provides verbose output of the results of training </dd>
+</dl>
+</dd></dl>
+<dl class="section note"><dt>Note</dt><dd><ul>
+<li>Many of the parameters are designed to be similar to the popular R package 'rpart'. An important distinction between rpart and the above MADlib function is that for both response and feature variables, MADlib considers integer values as categorical values, while rpart considers them as continuous.</li>
+<li>When using no surrogates (<em>max_surrogates</em>=0), all rows containing NULL value for any of the features used for training will be ignored from training and prediction.</li>
+<li>When cross-validation is not used (<em>n_folds</em>=0), each tree outputed is pruned by inputed cost-complextity (<em>cp</em>). With cross-validation, inputed <em>cp</em> is the minimum value of all the explored values of 'cp'. During cross-validation, we train an initial tree using the provided <em>cp</em> and explore all possible sub-trees (upto a single-node tree) to compute the optimal sub-tree. The optimal sub-tree and the 'cp' corresponding to this optimal sub-tree is placed in the <em>output_table</em>, with their columns named as <em>tree</em> and <em>pruning_cp</em> respectively.</li>
+</ul>
+</dd></dl>
+<p><a class="anchor" id="predict"></a></p><dl class="section user"><dt>Prediction Function</dt><dd>The prediction function is provided to estimate the conditional mean given a new predictor. It has the following syntax: <pre class="syntax">
+tree_predict(tree_model,
+             new_data_table,
+             output_table,
+             type)
+</pre></dd></dl>
+<p><b>Arguments</b> </p><dl class="arglist">
+<dt>tree_model </dt>
+<dd><p class="startdd">TEXT. Name of the table containing the decision tree model. This should be the output table returned from <em>tree_train</em></p>
+<p class="enddd"></p>
+</dd>
+<dt>new_data_table </dt>
+<dd><p class="startdd">TEXT. Name of the table containing prediction data. This table is expected to contain the same features that were used during training. The table should also contain <em>id_col_name</em> used for identifying each row</p>
+<p class="enddd"></p>
+</dd>
+<dt>output_table </dt>
+<dd><p class="startdd">TEXT. Name of the table to output prediction results to. If this table already exists then an error is returned. The table contains the <em>id_col_name</em> column giving the 'id' for each prediction and the prediction columns for the dependent variable.</p>
+<p>If <em>type</em> = 'response', then the table has a single additional column with the prediction value of the response. The type of this column depends on the type of the response variable used during training.</p>
+<p>If <em>type</em> = 'prob', then the table has multiple additional columns, one for each possible value of the response variable. The columns are labeled as 'estimated_prob_<em>dep_value</em>', where <em>dep_value</em> represents each value of the response</p>
+<p class="enddd"></p>
+</dd>
+<dt>type </dt>
+<dd>TEXT, optional, default: 'response'. For regression trees, the output is always the predicted value of the dependent variable. For classification trees, the <em>type</em> variable can be 'response', giving the classification prediction as output, or 'prob', giving the class probabilities as output. For each value of the dependent variable, a column with the probabilities is added to the output table  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>If the <em>new_data_table</em> contains categories of categorical variables not seen in the training data then the prediction for that row will be NULL.</dd></dl>
+<p><a class="anchor" id="display"></a></p><dl class="section user"><dt>Display Function</dt><dd>The display function is provided to output a graph representation of the decision tree. The output can either be in the popular 'dot' format that can be visualized using various programs including those in the GraphViz package, or in a simple text format. The details of the text format is outputted with the tree. <pre class="syntax">
+tree_display(tree_model, dot_format)
+</pre></dd></dl>
+<p>An additional display function is provided to output the surrogate splits chosen for each internal node. </p><pre class="syntax">
+tree_surr_display(tree_model)
+</pre><p>The output contains the list of surrogate splits for each internal node. The nodes are sorted in ascending order by id. This is equivalent to viewing the tree in a breadth-first manner. For each surrogate, we output the surrogate split (variable and threshold) and also give the number of rows that were common between the primary split and the surrogate split. Finally, the number of rows present in the majority branch of the primary split is also presented. Only surrogates that perform better than this majority branch are included in the surrogate list. When the primary variable has a NULL value the surrogate variables are used in order to compute the split for that node. If all surrogates variables are NULL, then the majority branch is used to compute the split for a tuple.</p>
+<p><b>Arguments</b> </p><dl class="arglist">
+<dt>tree_model_name </dt>
+<dd>TEXT. Name of the table containing the decision tree model </dd>
+<dt>dot_format </dt>
+<dd>BOOLEAN, default = TRUE. Output can either be in a dot format or a text format. If TRUE, the result is in the dot format, else output is in text format </dd>
+</dl>
+<p>The output is always returned as a 'TEXT'. For the dot format, the output can be redirected to a file on the client side and then rendered using visualization programs.</p>
+<p>If the user wants to export the dot format result to an external file, he can use the following method (Note: the user needs to use unaligned table output mode for psql with '-A' flag. And inside psql client, both '\t' and '\o' should be used):</p>
+<pre class="example">
+&gt; # under bash
+&gt; psql -A my_database
+# -- in psql now
+# \t
+# \o test.dot -- export to a file
+# select madlib.tree_display('tree_out');
+# \o
+# \t
+</pre><p>After the desired dot file has been generated, one can then use third-party plotting software to plot the trees in a nice figure: </p><pre class="example">
+&gt; # under bash, convert the dot file into a PDF file
+&gt; dot -Tpdf test.dot &gt; test.pdf
+&gt; xpdf test.pdf&amp;
+</pre><p><a class="anchor" id="examples"></a></p><dl class="section user"><dt>Examples</dt><dd>Decision tree classification example*</dd></dl>
+<ol type="1">
+<li>Prepare input data. <pre class="example">
+DROP TABLE IF EXISTS dt_golf;
+CREATE TABLE dt_golf (
+    id integer NOT NULL,
+    "OUTLOOK" text,
+    temperature double precision,
+    humidity double precision,
+    windy text,
+    class text
+) ;
+</pre> <pre class="example">
+COPY dt_golf (id,"OUTLOOK",temperature,humidity,windy,class) FROM stdin WITH DELIMITER '|';
+1|sunny|85|85|'false'|'Don''t Play'
+2|sunny|80|90|'true'|'Don''t Play'
+3|overcast|83|78|'false'|'Play'
+4|rain|70|96|'false'|'Play'
+5|rain|68|80|'false'|'Play'
+6|rain|65|70|'true'|'Don''t Play'
+7|overcast|64|65|'true'|'Play'
+8|sunny|72|95|'false'|'Don''t Play'
+9|sunny|69|70|'false'|'Play'
+10|rain|75|80|'false'|'Play'
+11|sunny|75|70|'true'|'Play'
+12|overcast|72|90|'true'|'Play'
+13|overcast|81|75|'false'|'Play'
+14|rain|71|80|'true'|'Don''t Play'
+\.
+</pre></li>
+<li>Run Decision tree train function. <pre class="example">
+SELECT madlib.tree_train('dt_golf',         -- source table
+                         'train_output',    -- output model table
+                         'id',              -- id column
+                         'class',           -- response
+                         '"OUTLOOK", temperature, humidity, windy',   -- features
+                         NULL::text,        -- exclude columns
+                         'gini',            -- split criterion
+                         NULL::text,        -- no grouping
+                         NULL::text,        -- no weights
+                         5,                 -- max depth
+                         3,                 -- min split
+                         1,                 -- min bucket
+                         10                 -- number of bins per continuous variable
+                         );
+</pre></li>
+<li>Predict output categories for the same data as was used for input. <pre class="example">
+SELECT madlib.tree_predict('train_output',
+                           'dt_golf',
+                           'prediction_results',
+                           'response');
+SELECT * FROM prediction_results;
+</pre> Result: <pre class="result">
+ id | estimated_class
+&#160;----+-----------------
+  1 | Don't Play
+  2 | Don't Play
+  3 | Play
+  4 | Play
+  5 | Play
+  6 | Don't Play
+  7 | Play
+  8 | Don't Play
+  9 | Play
+ 10 | Play
+ 11 | Play
+ 12 | Play
+ 13 | Play
+ 14 | Don't Play
+(14 rows)
+</pre></li>
+<li>Obtain a dot format display of the tree <pre class="example">
+SELECT madlib.tree_display('train_output');
+</pre> Result: <pre class="result">
+digraph "Classification tree for dt_golf" {
+         subgraph "cluster0"{
+         label=""
+"g0_0" [label="\"OUTLOOK"&lt;={overcast}", shape=ellipse];
+"g0_0" -&gt; "g0_1"[label="yes"];
+"g0_1" [label=""Play"",shape=box];
+"g0_0" -&gt; "g0_2"[label="no"];
+"g0_2" [label="temperature&lt;=75", shape=ellipse];
+"g0_2" -&gt; "g0_5"[label="yes"];
+"g0_2" -&gt; "g0_6"[label="no"];
+"g0_6" [label=""Don't Play"",shape=box];
+"g0_5" [label="temperature&lt;=65", shape=ellipse];
+"g0_5" -&gt; "g0_11"[label="yes"];
+"g0_11" [label=""Don't Play"",shape=box];
+"g0_5" -&gt; "g0_12"[label="no"];
+"g0_12" [label="temperature&lt;=70", shape=ellipse];
+"g0_12" -&gt; "g0_25"[label="yes"];
+"g0_25" [label=""Play"",shape=box];
+"g0_12" -&gt; "g0_26"[label="no"];
+"g0_26" [label="temperature&lt;=72", shape=ellipse];
+"g0_26" -&gt; "g0_53"[label="yes"];
+"g0_53" [label=""Don't Play"",shape=box];
+"g0_26" -&gt; "g0_54"[label="no"];
+"g0_54" [label=""Play"",shape=box];
+&#160;&#160;&#160;} //--- end of subgraph------------
+&#160;} //---end of digraph---------
+</pre></li>
+<li><p class="startli">Obtain a text display of the tree </p><pre class="example">
+SELECT madlib.tree_display('train_output', FALSE);
+</pre><p> Result: </p><pre class="result">
+&#160;-------------------------------------
+&#160;- Each node represented by 'id' inside ().
+&#160;- Leaf nodes have a * while internal nodes have the split condition at the end.
+&#160;- For each internal node (i), it's children will be at (2i+1) and (2i+2).
+&#160;- For each split the first indented child (2i+1) is the 'True' node and
+second indented child (2i+2) is the 'False' node.
+&#160;- Number of (weighted) rows for each response variable inside [].
+&#160;- Order of values = ['"Don\'t Play"', '"Play"']
+&#160;-------------------------------------
+(0)[ 5 9]  "OUTLOOK"&lt;={overcast}
+  (1)[ 0 4]  *
+  (2)[ 5 5]  temperature&lt;=75
+    (5)[ 3 5]  temperature&lt;=65
+      (11)[ 1 0]  *
+      (12)[ 2 5]  temperature&lt;=70
+        (25)[ 0 3]  *
+        (26)[ 2 2]  temperature&lt;=72
+          (53)[ 2 0]  *
+          (54)[ 0 2]  *
+    (6)[ 2 0]  *
+&#160;-------------------------------------
+</pre><p class="startli">Decision tree regression example*</p>
+</li>
+<li>Prepare input data. <pre class="example">
+CREATE TABLE mt_cars (
+    id integer NOT NULL,
+    mpg double precision,
+    cyl integer,
+    disp double precision,
+    hp integer,
+    drat double precision,
+    wt double precision,
+    qsec double precision,
+    vs integer,
+    am integer,
+    gear integer,
+    carb integer
+) ;
+</pre> <pre class="example">
+COPY mt_cars (id,mpg,cyl,disp,hp,drat,wt,qsec,vs,am,gear,carb) FROM stdin WITH DELIMITER '|' NULL '\null';
+1|18.7|8|360|175|3.15|3.44|17.02|0|0|3|2
+2|21|6|160|110|3.9|2.62|16.46|0|1|4|4
+3|24.4|4|146.7|62|3.69|3.19|20|1|0|4|2
+4|21|6|160|110|3.9|2.875|17.02|0|1|4|4
+5|17.8|6|167.6|123|3.92|3.44|18.9|1|0|4|4
+6|16.4|8|275.8|180|3.078|4.07|17.4|0|0|3|3
+7|22.8|4|108|93|3.85|2.32|18.61|1|1|4|1
+8|17.3|8|275.8|180|3.078|3.73|17.6|0|0|3|3
+9|21.4|\null|258|110|3.08|3.215|19.44|1|0|3|1
+10|15.2|8|275.8|180|3.078|3.78|18|0|0|3|3
+11|18.1|6|225|105|2.768|3.46|20.22|1|0|3|1
+12|32.4|4|78.7|66|4.08|2.20|19.47|1|1|4|1
+13|14.3|8|360|245|3.21|3.578|15.84|0|0|3|4
+14|22.8|4|140.8|95|3.92|3.15|22.9|1|0|4|2
+15|30.4|4|75.7|52|4.93|1.615|18.52|1|1|4|2
+16|19.2|6|167.6|123|3.92|3.44|18.3|1|0|4|4
+17|33.9|4|71.14|65|4.22|1.835|19.9|1|1|4|1
+18|15.2|\null|304|150|3.15|3.435|17.3|0|0|3|2
+19|10.4|8|472|205|2.93|5.25|17.98|0|0|3|4
+20|27.3|4|79|66|4.08|1.935|18.9|1|1|4|1
+21|10.4|8|460|215|3|5.424|17.82|0|0|3|4
+22|26|4|120.3|91|4.43|2.14|16.7|0|1|5|2
+23|14.7|8|440|230|3.23|5.345|17.42|0|0|3|4
+24|30.4|4|95.14|113|3.77|1.513|16.9|1|1|5|2
+25|21.5|4|120.1|97|3.70|2.465|20.01|1|0|3|1
+26|15.8|8|351|264|4.22|3.17|14.5|0|1|5|4
+27|15.5|8|318|150|2.768|3.52|16.87|0|0|3|2
+28|15|8|301|335|3.54|3.578|14.6|0|1|5|8
+29|13.3|8|350|245|3.73|3.84|15.41|0|0|3|4
+30|19.2|8|400|175|3.08|3.845|17.05|0|0|3|2
+31|19.7|6|145|175|3.62|2.77|15.5|0|1|5|6
+32|21.4|4|121|109|4.11|2.78|18.6|1|1|4|2
+\.
+</pre></li>
+<li>Run Decision Tree train function. <pre class="example">
+DROP TABLE IF EXISTS train_output, train_output_summary;
+SELECT madlib.tree_train('mt_cars',
+                         'train_output',
+                         'id',
+                         'mpg',
+                         '*',
+                         'id, hp, drat, am, gear, carb',  -- exclude columns
+                         'mse',
+                         NULL::text,
+                         NULL::text,
+                         10,
+                         8,
+                         3,
+                         10,
+                         NULL,
+                         'max_surrogates=2'
+                         );
+</pre></li>
+<li>Display the decision tree in basic text format. <pre class="example">
+SELECT madlib.tree_display('train_output', FALSE);
+</pre> Result: <pre class="result">
+&#160; -------------------------------------
+&#160;- Each node represented by 'id' inside ().
+&#160;- Each internal nodes has the split condition at the end, while each
+&#160;    leaf node has a * at the end.
+&#160;- For each internal node (i), its child nodes are indented by 1 level
+&#160;    with ids (2i+1) for True node and (2i+2) for False node.
+&#160;- Number of rows and average response value inside []. For a leaf node, this is the prediction.
+&#160;-------------------------------------
+ (0)[32, 20.0906]  cyl in {8,6}
+    (1)[21, 16.6476]  disp &lt;= 258
+       (3)[7, 19.7429]  *
+       (4)[14, 15.1]  qsec &lt;= 17.42
+          (9)[10, 15.81]  qsec &lt;= 16.9
+             (19)[5, 14.78]  *
+             (20)[5, 16.84]  *
+          (10)[4, 13.325]  *
+    (2)[11, 26.6636]  wt &lt;= 2.2
+       (5)[6, 30.0667]  *
+       (6)[5, 22.58]  *
+ &#160;-------------------------------------
+(1 row)
+</pre></li>
+<li>Display the surrogates in the decision tree. <pre class="example">
+SELECT madlib.tree_surr_display('train_output');
+</pre> Result: <pre class="result">
+&#160;-------------------------------------
+       Surrogates for internal nodes
+&#160;-------------------------------------
+ (0) cyl in {8,6}
+      1: disp &gt; 146.7    [common rows = 29]
+      2: vs in {0}    [common rows = 26]
+      [Majority branch = 19 ]
+ (1) disp &lt;= 258
+      1: cyl in {6,4}    [common rows = 19]
+      2: vs in {1}    [common rows = 18]
+      [Majority branch = 14 ]
+ (2) wt &lt;= 2.2
+      1: disp &lt;= 108    [common rows = 9]
+      2: qsec &lt;= 18.52    [common rows = 8]
+      [Majority branch = 6 ]
+ (4) qsec &lt;= 17.42
+      1: disp &gt; 275.8    [common rows = 11]
+      2: vs in {0}    [common rows = 10]
+      [Majority branch = 10 ]
+ (9) qsec &lt;= 16.9
+      1: wt &lt;= 3.84    [common rows = 8]
+      2: disp &lt;= 360    [common rows = 7]
+      [Majority branch = 5 ]
+&#160;-------------------------------------
+(1 row)
+</pre></li>
+</ol>
+<dl class="section note"><dt>Note</dt><dd>The 'cyl' parameter above has two tuples with null values. In the prediction example below, the surrogate splits for the <em>cyl in {8, 6}</em> split are used to predict those two tuples (<em>id = 9</em> and <em>id = 18</em>). The splits are used in descending order till a surrogate variable is found that is not NULL. In this case, the two tuples have non-NULL values for <em>disp</em>, hence the <em>disp &gt; 146.7</em> split is used to make the prediction. If all the surrogate variables had been NULL then the majority branch would have been followed.</dd></dl>
+<ol type="1">
+<li>Predict regression output for the same data and compare with original. <pre class="example">
+DROP TABLE IF EXISTS prediction_results;
+SELECT madlib.tree_predict('train_output',
+                           'mt_cars',
+                           'prediction_results',
+                           'response');
+SELECT s.id, mpg, estimated_mpg FROM prediction_results p, mt_cars s where s.id = p.id;
+</pre> Result: <pre class="result">
+  id | mpg  |  estimated_mpg
+----+------+------------------
+  1 | 18.7 |            16.84
+  2 |   21 | 19.7428571428571
+  3 | 24.4 |            22.58
+  4 |   21 | 19.7428571428571
+  5 | 17.8 | 19.7428571428571
+  6 | 16.4 |            16.84
+  7 | 22.8 |            22.58
+  8 | 17.3 |           13.325
+  9 | 21.4 | 19.7428571428571
+ 10 | 15.2 |           13.325
+ 11 | 18.1 | 19.7428571428571
+ 12 | 32.4 | 30.0666666666667
+ 13 | 14.3 |            14.78
+ 14 | 22.8 |            22.58
+ 15 | 30.4 | 30.0666666666667
+ 16 | 19.2 | 19.7428571428571
+ 17 | 33.9 | 30.0666666666667
+ 18 | 15.2 |            16.84
+ 19 | 10.4 |           13.325
+ 20 | 27.3 | 30.0666666666667
+ 21 | 10.4 |           13.325
+ 22 |   26 | 30.0666666666667
+ 23 | 14.7 |            16.84
+ 24 | 30.4 | 30.0666666666667
+ 25 | 21.5 |            22.58
+ 26 | 15.8 |            14.78
+ 27 | 15.5 |            14.78
+ 28 |   15 |            14.78
+ 29 | 13.3 |            14.78
+ 30 | 19.2 |            16.84
+ 31 | 19.7 | 19.7428571428571
+ 32 | 21.4 |            22.58
+(32 rows)
+</pre></li>
+</ol>
+<p><a class="anchor" id="literature"></a></p><dl class="section user"><dt>Literature</dt><dd>[1] Breiman, Leo; Friedman, J. H.; Olshen, R. A.; Stone, C. J. (1984). Classification and regression trees. Monterey, CA: Wadsworth &amp; Brooks/Cole Advanced Books &amp; Software.</dd></dl>
+<p><a class="anchor" id="related"></a></p><dl class="section user"><dt>Related Topics</dt><dd></dd></dl>
+<p>File <a class="el" href="decision__tree_8sql__in.html">decision_tree.sql_in</a> documenting the training function</p>
+<p><a class="el" href="group__grp__random__forest.html">Random Forest</a></p>
+</div><!-- contents -->
+</div><!-- doc-content -->
+<!-- start footer part -->
+<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
+  <ul>
+    <li class="footer">Generated on Mon Jul 27 2015 20:37:45 for MADlib by
+    <a href="http://www.doxygen.org/index.html">
+    <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.10 </li>
+  </ul>
+</div>
+</body>
+</html>

http://git-wip-us.apache.org/repos/asf/incubator-madlib-site/blob/7258f51a/docs/latest/group__grp__dectree.html
----------------------------------------------------------------------
diff --git a/docs/latest/group__grp__dectree.html b/docs/latest/group__grp__dectree.html
new file mode 100644
index 0000000..dd82473
--- /dev/null
+++ b/docs/latest/group__grp__dectree.html
@@ -0,0 +1,420 @@
+<!-- HTML header for doxygen 1.8.4-->
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<meta name="generator" content="Doxygen 1.8.10"/>
+<meta name="keywords" content="madlib,postgres,greenplum,machine learning,data mining,deep learning,ensemble methods,data science,market basket analysis,affinity analysis,pca,lda,regression,elastic net,huber white,proportional hazards,k-means,latent dirichlet allocation,bayes,support vector machines,svm"/>
+<title>MADlib: Decision Tree (old C4.5 implementation)</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="navtree.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="resize.js"></script>
+<script type="text/javascript" src="navtreedata.js"></script>
+<script type="text/javascript" src="navtree.js"></script>
+<script type="text/javascript">
+  $(document).ready(initResizable);
+  $(window).load(resizeHeight);
+</script>
+<link href="search/search.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="search/searchdata.js"></script>
+<script type="text/javascript" src="search/search.js"></script>
+<script type="text/javascript">
+  $(document).ready(function() { init_search(); });
+</script>
+<script type="text/x-mathjax-config">
+  MathJax.Hub.Config({
+    extensions: ["tex2jax.js", "TeX/AMSmath.js", "TeX/AMSsymbols.js"],
+    jax: ["input/TeX","output/HTML-CSS"],
+});
+</script><script src="../mathjax/MathJax.js"></script>
+<!-- hack in the navigation tree -->
+<script type="text/javascript" src="navtree_hack.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+<link href="madlib_extra.css" rel="stylesheet" type="text/css"/>
+<!-- google analytics -->
+<script>
+  (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
+  (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
+  m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
+  })(window,document,'script','//www.google-analytics.com/analytics.js','ga');
+  ga('create', 'UA-45382226-1', 'auto');
+  ga('send', 'pageview');
+</script>
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td id="projectlogo"><a href="http://madlib.incubator.apache.org"><img alt="Logo" src="madlib.png" height="50" style="padding-left:0.5em;" border="0"/ ></a></td>
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">
+   <span id="projectnumber">1.8</span>
+   </div>
+   <div id="projectbrief">User Documentation for MADlib</div>
+  </td>
+   <td>        <div id="MSearchBox" class="MSearchBoxInactive">
+        <span class="left">
+          <img id="MSearchSelect" src="search/mag_sel.png"
+               onmouseover="return searchBox.OnSearchSelectShow()"
+               onmouseout="return searchBox.OnSearchSelectHide()"
+               alt=""/>
+          <input type="text" id="MSearchField" value="Search" accesskey="S"
+               onfocus="searchBox.OnSearchFieldFocus(true)" 
+               onblur="searchBox.OnSearchFieldFocus(false)" 
+               onkeyup="searchBox.OnSearchFieldChange(event)"/>
+          </span><span class="right">
+            <a id="MSearchClose" href="javascript:searchBox.CloseResultsWindow()"><img id="MSearchCloseImg" border="0" src="search/close.png" alt=""/></a>
+          </span>
+        </div>
+</td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.10 -->
+<script type="text/javascript">
+var searchBox = new SearchBox("searchBox", "search",false,'Search');
+</script>
+</div><!-- top -->
+<div id="side-nav" class="ui-resizable side-nav-resizable">
+  <div id="nav-tree">
+    <div id="nav-tree-contents">
+      <div id="nav-sync" class="sync"></div>
+    </div>
+  </div>
+  <div id="splitbar" style="-moz-user-select:none;" 
+       class="ui-resizable-handle">
+  </div>
+</div>
+<script type="text/javascript">
+$(document).ready(function(){initNavTree('group__grp__dectree.html','');});
+</script>
+<div id="doc-content">
+<!-- window showing the filter options -->
+<div id="MSearchSelectWindow"
+     onmouseover="return searchBox.OnSearchSelectShow()"
+     onmouseout="return searchBox.OnSearchSelectHide()"
+     onkeydown="return searchBox.OnSearchSelectKey(event)">
+</div>
+
+<!-- iframe showing the search results (closed by default) -->
+<div id="MSearchResultsWindow">
+<iframe src="javascript:void(0)" frameborder="0" 
+        name="MSearchResults" id="MSearchResults">
+</iframe>
+</div>
+
+<div class="header">
+  <div class="headertitle">
+<div class="title">Decision Tree (old C4.5 implementation)<div class="ingroups"><a class="el" href="group__grp__deprecated.html">Deprecated Modules</a></div></div>  </div>
+</div><!--header-->
+<div class="contents">
+<div class="toc"><b>Contents</b> </p><ul>
+<li>
+<a href="#input">Input</a> </li>
+<li>
+<a href="#train">Training Function</a> </li>
+<li>
+<a href="#classify">Classification Function</a> </li>
+<li>
+<a href="#score">Scoring Function</a> </li>
+<li>
+<a href="#display">Display Tree Function</a> </li>
+<li>
+<a href="#notes">Implementation Notes</a> </li>
+<li>
+<a href="#examples">Examples</a> </li>
+<li>
+<a href="#related">Related Topics</a> </li>
+</ul>
+</div><dl class="section warning"><dt>Warning</dt><dd><em> This is an old implementation of decision trees. For a newer implementation, please see <a class="el" href="group__grp__decision__tree.html">Decision Tree</a></em></dd></dl>
+<p>This module provides an implementation of the C4.5 algorithm to grow decision trees.</p>
+<p>The implementation supports:</p><ul>
+<li>Building the decision tree</li>
+<li>Multiple split critera, including: &ndash; Information Gain &ndash; Gini Coefficient &ndash; Gain Ratio</li>
+<li>Decision tree Pruning</li>
+<li>Decision tree classification/scoring</li>
+<li>Decision tree display</li>
+<li>Rule generation</li>
+<li>Continuous and discrete features</li>
+<li>Missing value handling</li>
+</ul>
+<p><a class="anchor" id="train"></a></p><dl class="section user"><dt>Training Function</dt><dd></dd></dl>
+<p>Run the training algorithm on the source data: </p><pre class="syntax">
+c45_train( split_criterion,
+           training_table_name,
+           result_tree_table_name,
+           validation_table_name,
+           continuous_feature_names,
+           feature_col_names,
+           id_col_name,
+           class_col_name,
+           confidence_level,
+           how2handle_missing_value,
+           max_tree_depth,
+           node_prune_threshold,
+           node_split_threshold,
+           verbosity
+         )
+</pre><p> <b>Arguments</b> </p><dl class="arglist">
+<dt>split_criterion </dt>
+<dd>The name of the split criterion that should be used for tree construction. The valid values are ‘infogain’, ‘gainratio’, and ‘gini’. It can't be NULL. Information gain(infogain) and gini index(gini) are biased toward multivalued attributes. Gain ratio(gainratio) adjusts for this bias. However, it tends to prefer unbalanced splits in which one partition is much smaller than the others.  </dd>
+<dt>training_table_name </dt>
+<dd>The name of the table/view with the source data. The <b>training data</b> is expected to be of the following form: <pre>{TABLE|VIEW} <em>trainingSource</em> (
+    ...
+    <em>id</em> INT|BIGINT,
+    <em>feature1</em> SUPPORTED_DATA_TYPE,
+    <em>feature2</em> SUPPORTED_DATA_TYPE,
+    <em>feature3</em> SUPPORTED_DATA_TYPE,
+    ....................
+    <em>featureN</em> SUPPORTED_DATA_TYPE,
+    <em>class</em>    SUPPORTED_DATA_TYPE,
+    ...
+)</pre> The detailed list of SUPPORTED_DATA_TYPE is: SMALLINT, INT, BIGINT, FLOAT8, REAL, DECIMAL, INET, CIDR, MACADDR, BOOLEAN, CHAR, VARCHAR, TEXT, "char", DATE, TIME, TIMETZ, TIMESTAMP, TIMESTAMPTZ, and INTERVAL.  </dd>
+<dt>result_tree_table_name </dt>
+<dd>The name of the table to contain the decision tree output. The table stores an abstract object (representing the model) used for further classification. It has the following columns: <table  class="output">
+<tr>
+<th>id  </th></tr>
+<tr>
+<th>tree_location  </th></tr>
+<tr>
+<th>feature  </th></tr>
+<tr>
+<th>probability  </th></tr>
+<tr>
+<th>ebp_coeff  </th></tr>
+<tr>
+<th>maxclass  </th></tr>
+<tr>
+<th>scv  </th></tr>
+<tr>
+<th>live  </th></tr>
+<tr>
+<th>sample_size  </th></tr>
+<tr>
+<th>parent_id  </th></tr>
+<tr>
+<th>lmc_nid  </th></tr>
+<tr>
+<th>lmc_fval  </th></tr>
+<tr>
+<th>is_continuous  </th></tr>
+<tr>
+<th>split_value  </th></tr>
+<tr>
+<th>tid  </th></tr>
+<tr>
+<th>dp_ids  </th></tr>
+</table>
+</dd>
+<dt>validation_table_name </dt>
+<dd>The name of the table/view that contains the validation set used for tree pruning. The default is NULL, in which case we will not do tree pruning.  </dd>
+<dt>continuous_feature_names </dt>
+<dd>A comma-separated list of the names of features whose values are continuous. The default is null, which means there are no continuous features in the training table.  </dd>
+<dt>feature_col_names </dt>
+<dd>A comma-separated list of the names of table columns, each of which defines a feature. The default value is null, which means all the columns in the training table, except columns named ‘id’ and ‘class’, will be used as features.  </dd>
+<dt>id_col_name </dt>
+<dd>The name of the column containing an ID for each record. </dd>
+<dt>class_col_name </dt>
+<dd>The name of the column containing the labeled class.  </dd>
+<dt>confidence_level </dt>
+<dd>A statistical confidence interval of the resubstitution error. </dd>
+<dt>how2handle_missing_value </dt>
+<dd>The way to handle missing value. The valid value is 'explicit' or 'ignore'. </dd>
+<dt>max_tree_depth </dt>
+<dd>Specifies the maximum number of levels in the result DT to avoid overgrown DTs.  </dd>
+<dt>node_prune_threshold </dt>
+<dd>The minimum percentage of the number of records required in a child node. It can't be NULL. The range of it is in [0.0, 1.0]. This threshold only applies to the non-root nodes. Therefore, if its value is 1, then the trained tree only has one node (the root node); if its value is 0, then no nodes will be pruned by this parameter. </dd>
+<dt>node_split_threshold </dt>
+<dd>The minimum percentage of the number of records required in a node in order for a further split to be possible. It can't be NULL. The range of it is in [0.0, 1.0]. If it's value is 1, then the trained tree only has two levels, since only the root node can grow; if its value is 0, then trees can grow extensively. </dd>
+<dt>verbosity </dt>
+<dd>An integer greater than 0 means this function runs in verbose mode. </dd>
+</dl>
+<p><a class="anchor" id="classify"></a></p><dl class="section user"><dt>Classification Function</dt><dd></dd></dl>
+<p>The classification function uses the learned model stored by the training function to create the classification results. </p><pre class="syntax">
+c45_classify( tree_table_name,
+              classification_table_name,
+              result_table_name
+            )
+</pre><p> <b>Arguments</b> </p><dl class="arglist">
+<dt>tree_table_name </dt>
+<dd><p class="startdd">The name of the table containing the trained model.</p>
+<p class="enddd">The data to classify is expected to be in the same form as the training data, except that it does not need a class column.  </p>
+</dd>
+<dt>classification_table_name </dt>
+<dd>The name of the table containing the data to classify. </dd>
+<dt>result_table_name </dt>
+<dd>The name of the output table. </dd>
+</dl>
+<p><a class="anchor" id="score"></a></p><dl class="section user"><dt>Scoring Function</dt><dd>The scoring function scores the learned model against a validation data set. <pre class="syntax">
+c45_score( tree_table_name,
+           validation_table_name,
+           verbosity
+         );
+</pre></dd></dl>
+<p>This gives a ratio of correctly classified items in the validation set.</p>
+<p><a class="anchor" id="display"></a></p><dl class="section user"><dt>Display Tree Function</dt><dd></dd></dl>
+<p>The display tree function displays the learned model in a human-readable format.</p>
+<pre class="syntax">
+c45_display( tree_table_name
+           );
+</pre><p><a class="anchor" id="clean"></a></p><dl class="section user"><dt>Clean Tree Function</dt><dd></dd></dl>
+<p>The clean tree function cleans up the learned model and all metadata. </p><pre class="syntax">
+  c45_clean( tree_table_name
+           );
+</pre><p><a class="anchor" id="notes"></a></p><dl class="section user"><dt>Implementation Notes</dt><dd></dd></dl>
+<p>Due to some implementation difference, decisiont tree on HAWQ is much slower than on Greenplum database when running on small data sets. However, for larger data sets, the performance difference is much smaller. For example, in a test with 0.75 million rows of data, decision tree on HAWQ is only one time slower than on GPDB. This is because the overhead due to the different implementation is proportional to the tree size, and is usually negligible as data size increases (The tree size is not likely to increase proportionally with the data size. For example, if a 10-node tree is used to fit a data set with 1000 rows, it is very unlikely to fit another data set with 1 million rows with a 10000-node tree).</p>
+<p><a class="anchor" id="examples"></a></p><dl class="section user"><dt>Examples</dt><dd></dd></dl>
+<ol type="1">
+<li>Prepare an input table. <pre class="example">
+SELECT * FROM golf_data ORDER BY id;
+</pre> Result: <pre class="result">
+ id | outlook  | temperature | humidity | windy  |    class
+&#160;---+----------+-------------+----------+--------+--------------
+  1 | sunny    |          85 |       85 |  false |  Do not Play
+  2 | sunny    |          80 |       90 |  true  |  Do not Play
+  3 | overcast |          83 |       78 |  false |  Play
+  4 | rain     |          70 |       96 |  false |  Play
+  5 | rain     |          68 |       80 |  false |  Play
+  6 | rain     |          65 |       70 |  true  |  Do not Play
+  7 | overcast |          64 |       65 |  true  |  Play
+  8 | sunny    |          72 |       95 |  false |  Do not Play
+  9 | sunny    |          69 |       70 |  false |  Play
+ 10 | rain     |          75 |       80 |  false |  Play
+ 11 | sunny    |          75 |       70 |  true  |  Play
+ 12 | overcast |          72 |       90 |  true  |  Play
+ 13 | overcast |          81 |       75 |  false |  Play
+ 14 | rain     |          71 |       80 |  true  |  Do not Play
+(14 rows)
+</pre></li>
+<li>Train the decision tree model. Run the <a class="el" href="c45_8sql__in.html#ac25e17ecbc70149aa559018e718fc793" title="Cleanup the trained tree table and any relevant tables. ">c45_clean()</a> function first to clean up any model and metadata from previous executions. <pre class="example">
+SELECT * FROM madlib.c45_clean( 'trained_tree_infogain'
+                              );
+SELECT * FROM madlib.c45_train( 'infogain',
+                                'golf_data',
+                                'trained_tree_infogain',
+                                null,
+                                'temperature,humidity',
+                                'outlook,temperature,humidity,windy',
+                                'id',
+                                'class',
+                                100,
+                                'explicit',
+                                5,
+                                0.001,
+                                0.001,
+                                0
+                              );
+</pre> Result: <pre class="result">
+ training_set_size | tree_nodes | tree_depth |  training_time  | split_criterion
+&#160;------------------+------------+------------+-----------------+-----------------
+                14 |          8 |          3 | 00:00:00.871805 | infogain
+(1 row)
+</pre></li>
+<li>View the the tree model table. <pre class="example">
+SELECT * FROM trained_tree_infogain ORDER BY id;
+</pre> Result: <pre class="result">
+ id | tree_location | feature |    probability    | ebp_coeff | maxclass |       scv         | live |sample_size | parent_id | lmc_nid | lmc_fval | is_continuous   | split_value
+&#160;---+---------------+---------+-------------------+-----------+----------+-------------------+------+----------+-----------+---------+----------+-----------------+-------------
+  1 | {0}           |       3 | 0.642857142857143 |         1 |        2 | 0.171033941880327 |    0 |       14 |         0 |       2 |        1 | f               |
+  2 | {0,1}         |       4 |                 1 |         1 |        2 |                 0 |    0 |        4 |         1 |         |          | f               |
+  3 | {0,2}         |       4 |               0.6 |         1 |        2 | 0.673011667009257 |    0 |        5 |         1 |       5 |        1 | f               |
+  4 | {0,3}         |       2 |               0.6 |         1 |        1 | 0.673011667009257 |    0 |        5 |         1 |       7 |        1 | t               |          70
+  5 | {0,2,1}       |       4 |                 1 |         1 |        2 |                 0 |    0 |        3 |         3 |         |          | f               |
+  6 | {0,2,2}       |       4 |                 1 |         1 |        1 |                 0 |    0 |        2 |         3 |         |          | f               |
+  7 | {0,3,1}       |       4 |                 1 |         1 |        2 |                 0 |    0 |        2 |         4 |         |          | f               |
+  8 | {0,3,2}       |       4 |                 1 |         1 |        1 |                 0 |    0 |        3 |         4 |         |          | f               |
+(8 rows)
+</pre></li>
+<li>Display the tree with a human readable format: <pre class="example">
+SELECT madlib.c45_display('trained_tree_infogain');
+</pre> Result: <pre class="result">
+                                      c45_display
+&#160;--------------------------------------------------------------------------------------
+Tree 1
+    Root Node  : class(  Play)   num_elements(14)  predict_prob(0.642857142857143)
+         outlook:  = overcast : class( Play)   num_elements(4)  predict_prob(1)
+         outlook:  = rain : class( Play)   num_elements(5)  predict_prob(0.6)
+             windy:  =  false : class( Play)   num_elements(3)  predict_prob(1)
+             windy:  = true  : class(  Do not Play)   num_elements(2)  predict_prob(1)
+         outlook:  =  sunny      : class(  Do not Play)   num_elements(5)  predict_prob(0.6)
+             humidity:  &lt;= 70 : class( Play)   num_elements(2)  predict_prob(1)
+             humidity:  &gt; 70  : class(  Do not Play)   num_elements(3)  predict_prob(1)
+(1 row)
+</pre></li>
+<li>Classify some data with the learned model. <pre class="example">
+SELECT * FROM madlib.c45_classify ( 'trained_tree_infogain',
+                                    'golf_data',
+                                    'classification_result'
+                                  );
+</pre> Result: <pre class="result">
+ input_set_size |    classification_time
+----------------+-----------------
+             14 | 00:00:00.247713
+(1 row)
+</pre></li>
+<li>Check the classification results. <pre class="example">
+SELECT t.id, t.outlook, t.temperature, t.humidity, t.windy, c.class
+FROM   madlib.classification_result c, golf_data t
+WHERE  t.id=c.id ORDER BY id;
+</pre> Result: <pre class="result">
+ id | outlook  | temperature | humidity | windy  |    class
+&#160;---+----------+-------------+----------+--------+--------------
+  1 | sunny    |          85 |       85 |  false |  Do not Play
+  2 | sunny    |          80 |       90 |  true  |  Do not Play
+  3 | overcast |          83 |       78 |  false |  Play
+  4 | rain     |          70 |       96 |  false |  Play
+  5 | rain     |          68 |       80 |  false |  Play
+  6 | rain     |          65 |       70 |  true  |  Do not Play
+  7 | overcast |          64 |       65 |  true  |  Play
+  8 | sunny    |          72 |       95 |  false |  Do not Play
+  9 | sunny    |          69 |       70 |  false |  Play
+ 10 | rain     |          75 |       80 |  false |  Play
+ 11 | sunny    |          75 |       70 |  true  |  Play
+ 12 | overcast |          72 |       90 |  true  |  Play
+ 13 | overcast |          81 |       75 |  false |  Play
+ 14 | rain     |          71 |       80 |  true  |  Do not Play
+(14 rows)
+</pre></li>
+<li>Score the data against a validation set. <pre class="example">
+SELECT * FROM madlib.c45_score( 'trained_tree_infogain',
+                                'golf_data_validation',
+                                0)
+                              );
+</pre> Result: <pre class="result">
+ c45_score
+&#160;----------
+      1
+(1 row)
+</pre></li>
+<li>Clean up the tree and metadata. <pre class="example">
+SELECT madlib.c45_clean( 'trained_tree_infogain'
+                       );
+</pre> Result: <pre class="result">
+ c45_clean
+&#160;----------
+&#160;
+(1 row)
+</pre></li>
+</ol>
+<p><a class="anchor" id="literature"></a></p><dl class="section user"><dt>Literature</dt><dd></dd></dl>
+<p>[1] <a href="http://en.wikipedia.org/wiki/C4.5_algorithm">http://en.wikipedia.org/wiki/C4.5_algorithm</a></p>
+<p><a class="anchor" id="related"></a></p><dl class="section user"><dt>Related Topics</dt><dd>File <a class="el" href="c45_8sql__in.html" title="C4.5 APIs and main controller written in PL/PGSQL. ">c45.sql_in</a> documenting the SQL functions. </dd></dl>
+</div><!-- contents -->
+</div><!-- doc-content -->
+<!-- start footer part -->
+<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
+  <ul>
+    <li class="footer">Generated on Mon Jul 27 2015 20:37:45 for MADlib by
+    <a href="http://www.doxygen.org/index.html">
+    <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.10 </li>
+  </ul>
+</div>
+</body>
+</html>

http://git-wip-us.apache.org/repos/asf/incubator-madlib-site/blob/7258f51a/docs/latest/group__grp__dense__linear__solver.html
----------------------------------------------------------------------
diff --git a/docs/latest/group__grp__dense__linear__solver.html b/docs/latest/group__grp__dense__linear__solver.html
new file mode 100644
index 0000000..8a6a31b
--- /dev/null
+++ b/docs/latest/group__grp__dense__linear__solver.html
@@ -0,0 +1,263 @@
+<!-- HTML header for doxygen 1.8.4-->
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<meta name="generator" content="Doxygen 1.8.10"/>
+<meta name="keywords" content="madlib,postgres,greenplum,machine learning,data mining,deep learning,ensemble methods,data science,market basket analysis,affinity analysis,pca,lda,regression,elastic net,huber white,proportional hazards,k-means,latent dirichlet allocation,bayes,support vector machines,svm"/>
+<title>MADlib: Dense Linear Systems</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="navtree.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="resize.js"></script>
+<script type="text/javascript" src="navtreedata.js"></script>
+<script type="text/javascript" src="navtree.js"></script>
+<script type="text/javascript">
+  $(document).ready(initResizable);
+  $(window).load(resizeHeight);
+</script>
+<link href="search/search.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="search/searchdata.js"></script>
+<script type="text/javascript" src="search/search.js"></script>
+<script type="text/javascript">
+  $(document).ready(function() { init_search(); });
+</script>
+<script type="text/x-mathjax-config">
+  MathJax.Hub.Config({
+    extensions: ["tex2jax.js", "TeX/AMSmath.js", "TeX/AMSsymbols.js"],
+    jax: ["input/TeX","output/HTML-CSS"],
+});
+</script><script src="../mathjax/MathJax.js"></script>
+<!-- hack in the navigation tree -->
+<script type="text/javascript" src="navtree_hack.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+<link href="madlib_extra.css" rel="stylesheet" type="text/css"/>
+<!-- google analytics -->
+<script>
+  (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
+  (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
+  m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
+  })(window,document,'script','//www.google-analytics.com/analytics.js','ga');
+  ga('create', 'UA-45382226-1', 'auto');
+  ga('send', 'pageview');
+</script>
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td id="projectlogo"><a href="http://madlib.incubator.apache.org"><img alt="Logo" src="madlib.png" height="50" style="padding-left:0.5em;" border="0"/ ></a></td>
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">
+   <span id="projectnumber">1.8</span>
+   </div>
+   <div id="projectbrief">User Documentation for MADlib</div>
+  </td>
+   <td>        <div id="MSearchBox" class="MSearchBoxInactive">
+        <span class="left">
+          <img id="MSearchSelect" src="search/mag_sel.png"
+               onmouseover="return searchBox.OnSearchSelectShow()"
+               onmouseout="return searchBox.OnSearchSelectHide()"
+               alt=""/>
+          <input type="text" id="MSearchField" value="Search" accesskey="S"
+               onfocus="searchBox.OnSearchFieldFocus(true)" 
+               onblur="searchBox.OnSearchFieldFocus(false)" 
+               onkeyup="searchBox.OnSearchFieldChange(event)"/>
+          </span><span class="right">
+            <a id="MSearchClose" href="javascript:searchBox.CloseResultsWindow()"><img id="MSearchCloseImg" border="0" src="search/close.png" alt=""/></a>
+          </span>
+        </div>
+</td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.10 -->
+<script type="text/javascript">
+var searchBox = new SearchBox("searchBox", "search",false,'Search');
+</script>
+</div><!-- top -->
+<div id="side-nav" class="ui-resizable side-nav-resizable">
+  <div id="nav-tree">
+    <div id="nav-tree-contents">
+      <div id="nav-sync" class="sync"></div>
+    </div>
+  </div>
+  <div id="splitbar" style="-moz-user-select:none;" 
+       class="ui-resizable-handle">
+  </div>
+</div>
+<script type="text/javascript">
+$(document).ready(function(){initNavTree('group__grp__dense__linear__solver.html','');});
+</script>
+<div id="doc-content">
+<!-- window showing the filter options -->
+<div id="MSearchSelectWindow"
+     onmouseover="return searchBox.OnSearchSelectShow()"
+     onmouseout="return searchBox.OnSearchSelectHide()"
+     onkeydown="return searchBox.OnSearchSelectKey(event)">
+</div>
+
+<!-- iframe showing the search results (closed by default) -->
+<div id="MSearchResultsWindow">
+<iframe src="javascript:void(0)" frameborder="0" 
+        name="MSearchResults" id="MSearchResults">
+</iframe>
+</div>
+
+<div class="header">
+  <div class="headertitle">
+<div class="title">Dense Linear Systems<div class="ingroups"><a class="el" href="group__grp__utility__functions.html">Utility Functions</a> &raquo; <a class="el" href="group__grp__linear__solver.html">Linear Solvers</a></div></div>  </div>
+</div><!--header-->
+<div class="contents">
+<div class="toc"><b>Contents</b> </p><ul>
+<li class="level1">
+<a href="#dls_usage">Solution Function</a> </li>
+<li class="level1">
+<a href="#dls_opt_params">Optimizer Parameters</a> </li>
+<li class="level1">
+<a href="#dls_examples">Examples</a> </li>
+<li class="level1">
+<a href="#related">Related Topics</a> </li>
+</ul>
+</div><p>The linear systems module implements solution methods for systems of consistent linear equations. Systems of linear equations take the form: </p><p class="formulaDsp">
+\[ Ax = b \]
+</p>
+<p>where \(x \in \mathbb{R}^{n}\), \(A \in \mathbb{R}^{m \times n} \) and \(b \in \mathbb{R}^{m}\). We assume that there are no rows of \(A\) where all elements are zero. The algorithms implemented in this module can handle large dense linear systems. Currently, the algorithms implemented in this module solve the linear system by a direct decomposition. Hence, these methods are known as <em>direct method</em>.</p>
+<p><a class="anchor" id="dls_usage"></a></p><dl class="section user"><dt>Solution Function</dt><dd><pre class="syntax">
+linear_solver_dense( tbl_source,
+                     tbl_result,
+                     row_id,
+                     LHS,
+                     RHS,
+                     grouping_col,
+                     optimizer,
+                     optimizer_params
+                   )
+</pre> <b>Arguments</b> <dl class="arglist">
+<dt>tbl_source </dt>
+<dd><p class="startdd">TEXT. The name of the table containing the training data. The input data is expected to be of the following form: </p><pre>{TABLE|VIEW} <em>sourceName</em> (
+    ...
+    <em>row_id</em>          FLOAT8,
+    <em>left_hand_side</em>  FLOAT8[],
+    <em>right_hand_side</em> FLOAT8,
+    ...
+)</pre><p>Each row represents a single equation. The <em>right_hand_side</em> column refers to the right hand side of the equations while the <em>left_hand_side</em> column refers to the multipliers on the variables on the left hand side of the same equations.</p>
+<p class="enddd"></p>
+</dd>
+<dt>tbl_result </dt>
+<dd><p class="startdd">TEXT. The name of the table where the output is saved. The output is stored in the table named by the <em>tbl_result</em> argument. It contains the following columns: </p><table  class="output">
+<tr>
+<th>solution </th><td>FLOAT8[]. The solution variables in the same order as that provided as input in the 'left_hand_side' column name of the <em>source_table</em>  </td></tr>
+<tr>
+<th>residual_norm </th><td>FLOAT8. The scaled residual norm, defined as \( \frac{|Ax - b|}{|b|} \). This value is an indication of the accuracy of the solution.  </td></tr>
+<tr>
+<th>iters </th><td>INTEGER. Number of iterations required by the algorithm (only applicable for iterative algorithms). The output is NULL for 'direct' methods.   </td></tr>
+</table>
+<p class="enddd"></p>
+</dd>
+<dt>row_id </dt>
+<dd><p class="startdd">TEXT. The name of the column storing the 'row id' of the equations.</p>
+<p>For a system with N equations, the row_id's must be a continuous range of integers from \( 0 \ldots n-1 \). </p>
+<p class="enddd"></p>
+</dd>
+<dt>LHS </dt>
+<dd><p class="startdd">TEXT. The name of the column storing the 'left hand side' of the equations, stored as an array.</p>
+<p class="enddd"></p>
+</dd>
+<dt>RHS </dt>
+<dd><p class="startdd">TEXT. The name of the column storing the 'right hand side' of the equations.</p>
+<p class="enddd"></p>
+</dd>
+<dt>grouping_cols (optional)  </dt>
+<dd>TEXT, default: NULL. Group by column names. <em>Not currently implemented. Any non-NULL value is ignored.</em> </dd>
+<dt>optimizer (optional)  </dt>
+<dd><p class="startdd">TEXT, default: 'direct'. The type of optimizer.</p>
+<p class="enddd"></p>
+</dd>
+<dt>optimizer_params (optional)  </dt>
+<dd>TEXT, default: NULL. Optimizer specific parameters. </dd>
+</dl>
+</dd></dl>
+<p><a class="anchor" id="dls_opt_params"></a></p><dl class="section user"><dt>Optimizer Parameters</dt><dd></dd></dl>
+<p>For each optimizer, there are specific parameters that can be tuned for better performance.</p>
+<dl class="arglist">
+<dt>algorithm (default: householderqr) </dt>
+<dd><p class="startdd">There are several algorithms that can be classified as 'direct' methods of solving linear systems. MADlib dense linear system solvers provide various algorithmic options for users.</p>
+<p>The following table provides a guideline on the choice of algorithm based on conditions on the A matrix, speed of the algorithms and numerical stability. </p><pre class="fragment"> Algorithm            | Conditions on A  | Speed | Accuracy
+ ----------------------------------------------------------
+ householderqr        | None             |  ++   |  +
+ partialpivlu         | Invertable       |  ++   |  +
+ fullpivlu            | None             |  -    |  +++
+ colpivhouseholderqr  | None             |  +    |  ++
+ fullpivhouseholderqr | None             |  -    |  +++
+ llt                  | Pos. Definite    |  +++  |  +
+ ldlt                 | Pos. or Neg Def  |  +++  |  ++
+</pre><p>For speed '++' is faster than '+', which is faster than '-'. For accuracy '+++' is better than '++'.</p>
+<p class="enddd">More details about the individual algorithms can be found in the <a href="http://eigen.tuxfamily.org/dox-devel/group__TutorialLinearAlgebra.html">Eigen documentation</a>. Eigen is an open source library for linear algebra.  </p>
+</dd>
+</dl>
+<p><a class="anchor" id="dls_examples"></a></p><dl class="section user"><dt>Examples</dt><dd></dd></dl>
+<ol type="1">
+<li>View online help for the linear systems solver function. <pre class="example">
+SELECT madlib.linear_solver_dense();
+</pre></li>
+<li>Create the sample data set. <pre class="example">
+CREATE TABLE linear_systems_test_data( id INTEGER NOT NULL,
+                                       lhs DOUBLE PRECISION[],
+                                       rhs DOUBLE PRECISION
+                                     );
+INSERT INTO linear_systems_test_data(id, lhs, rhs)
+       VALUES
+        (0, ARRAY[1,0,0], 20),
+        (1, ARRAY[0,1,0], 15),
+        (2, ARRAY[0,0,1], 20);
+</pre></li>
+<li>Solve the linear systems with default parameters. <pre class="example">
+SELECT madlib.linear_solver_dense( 'linear_systems_test_data',
+                                   'output_table',
+                                   'id',
+                                   'lhs',
+                                   'rhs'
+                                 );
+</pre></li>
+<li>Obtain the output from the output table. <pre class="example">
+\x on
+SELECT * FROM output_table;
+</pre> Result: <pre class="result">
+--------------------+-------------------------------------
+solution            | {20,15,20}
+residual_norm       | 0
+iters               | NULL
+</pre></li>
+<li>Choose an algorithm different than the default. <pre class="example">
+DROP TABLE IF EXISTS result_table;
+SELECT madlib.linear_solver_dense( 'linear_systems_test_data',
+                                   'result_table',
+                                   'id',
+                                   'lhs',
+                                   'rhs',
+                                   NULL,
+                                   'direct',
+                                   'algorithm=llt'
+                                 );
+</pre></li>
+</ol>
+<p><a class="anchor" id="related"></a></p><dl class="section user"><dt>Related Topics</dt><dd>File <a class="el" href="dense__linear__systems_8sql__in.html" title="SQL functions for linear systems. ">dense_linear_systems.sql_in</a> documenting the SQL functions</dd></dl>
+</div><!-- contents -->
+</div><!-- doc-content -->
+<!-- start footer part -->
+<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
+  <ul>
+    <li class="footer">Generated on Mon Jul 27 2015 20:37:45 for MADlib by
+    <a href="http://www.doxygen.org/index.html">
+    <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.10 </li>
+  </ul>
+</div>
+</body>
+</html>

http://git-wip-us.apache.org/repos/asf/incubator-madlib-site/blob/7258f51a/docs/latest/group__grp__deprecated.html
----------------------------------------------------------------------
diff --git a/docs/latest/group__grp__deprecated.html b/docs/latest/group__grp__deprecated.html
new file mode 100644
index 0000000..af92407
--- /dev/null
+++ b/docs/latest/group__grp__deprecated.html
@@ -0,0 +1,155 @@
+<!-- HTML header for doxygen 1.8.4-->
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<meta name="generator" content="Doxygen 1.8.10"/>
+<meta name="keywords" content="madlib,postgres,greenplum,machine learning,data mining,deep learning,ensemble methods,data science,market basket analysis,affinity analysis,pca,lda,regression,elastic net,huber white,proportional hazards,k-means,latent dirichlet allocation,bayes,support vector machines,svm"/>
+<title>MADlib: Deprecated Modules</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="navtree.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="resize.js"></script>
+<script type="text/javascript" src="navtreedata.js"></script>
+<script type="text/javascript" src="navtree.js"></script>
+<script type="text/javascript">
+  $(document).ready(initResizable);
+  $(window).load(resizeHeight);
+</script>
+<link href="search/search.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="search/searchdata.js"></script>
+<script type="text/javascript" src="search/search.js"></script>
+<script type="text/javascript">
+  $(document).ready(function() { init_search(); });
+</script>
+<script type="text/x-mathjax-config">
+  MathJax.Hub.Config({
+    extensions: ["tex2jax.js", "TeX/AMSmath.js", "TeX/AMSsymbols.js"],
+    jax: ["input/TeX","output/HTML-CSS"],
+});
+</script><script src="../mathjax/MathJax.js"></script>
+<!-- hack in the navigation tree -->
+<script type="text/javascript" src="navtree_hack.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+<link href="madlib_extra.css" rel="stylesheet" type="text/css"/>
+<!-- google analytics -->
+<script>
+  (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
+  (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
+  m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
+  })(window,document,'script','//www.google-analytics.com/analytics.js','ga');
+  ga('create', 'UA-45382226-1', 'auto');
+  ga('send', 'pageview');
+</script>
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td id="projectlogo"><a href="http://madlib.incubator.apache.org"><img alt="Logo" src="madlib.png" height="50" style="padding-left:0.5em;" border="0"/ ></a></td>
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">
+   <span id="projectnumber">1.8</span>
+   </div>
+   <div id="projectbrief">User Documentation for MADlib</div>
+  </td>
+   <td>        <div id="MSearchBox" class="MSearchBoxInactive">
+        <span class="left">
+          <img id="MSearchSelect" src="search/mag_sel.png"
+               onmouseover="return searchBox.OnSearchSelectShow()"
+               onmouseout="return searchBox.OnSearchSelectHide()"
+               alt=""/>
+          <input type="text" id="MSearchField" value="Search" accesskey="S"
+               onfocus="searchBox.OnSearchFieldFocus(true)" 
+               onblur="searchBox.OnSearchFieldFocus(false)" 
+               onkeyup="searchBox.OnSearchFieldChange(event)"/>
+          </span><span class="right">
+            <a id="MSearchClose" href="javascript:searchBox.CloseResultsWindow()"><img id="MSearchCloseImg" border="0" src="search/close.png" alt=""/></a>
+          </span>
+        </div>
+</td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.10 -->
+<script type="text/javascript">
+var searchBox = new SearchBox("searchBox", "search",false,'Search');
+</script>
+</div><!-- top -->
+<div id="side-nav" class="ui-resizable side-nav-resizable">
+  <div id="nav-tree">
+    <div id="nav-tree-contents">
+      <div id="nav-sync" class="sync"></div>
+    </div>
+  </div>
+  <div id="splitbar" style="-moz-user-select:none;" 
+       class="ui-resizable-handle">
+  </div>
+</div>
+<script type="text/javascript">
+$(document).ready(function(){initNavTree('group__grp__deprecated.html','');});
+</script>
+<div id="doc-content">
+<!-- window showing the filter options -->
+<div id="MSearchSelectWindow"
+     onmouseover="return searchBox.OnSearchSelectShow()"
+     onmouseout="return searchBox.OnSearchSelectHide()"
+     onkeydown="return searchBox.OnSearchSelectKey(event)">
+</div>
+
+<!-- iframe showing the search results (closed by default) -->
+<div id="MSearchResultsWindow">
+<iframe src="javascript:void(0)" frameborder="0" 
+        name="MSearchResults" id="MSearchResults">
+</iframe>
+</div>
+
+<div class="header">
+  <div class="summary">
+<a href="#groups">Modules</a>  </div>
+  <div class="headertitle">
+<div class="title">Deprecated Modules</div>  </div>
+</div><!--header-->
+<div class="contents">
+<a name="details" id="details"></a><h2 class="groupheader">Detailed Description</h2>
+<p>A collection of deprecated modules. </p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2 class="groupheader"><a name="groups"></a>
+Modules</h2></td></tr>
+<tr class="memitem:group__grp__dectree"><td class="memItemLeft" align="right" valign="top">&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__grp__dectree.html">Decision Tree (old C4.5 implementation)</a></td></tr>
+<tr class="memdesc:group__grp__dectree"><td class="mdescLeft">&#160;</td><td class="mdescRight">Generates a decision tree using the C4.5 algorithm. <br /></td></tr>
+<tr class="separator:"><td class="memSeparator" colspan="2">&#160;</td></tr>
+<tr class="memitem:group__grp__svdmf"><td class="memItemLeft" align="right" valign="top">&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__grp__svdmf.html">Matrix Factorization</a></td></tr>
+<tr class="memdesc:group__grp__svdmf"><td class="mdescLeft">&#160;</td><td class="mdescRight">Computes low-rank approximation of a sparse matrix. <br /></td></tr>
+<tr class="separator:"><td class="memSeparator" colspan="2">&#160;</td></tr>
+<tr class="memitem:group__grp__mlogreg"><td class="memItemLeft" align="right" valign="top">&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__grp__mlogreg.html">Multinomial Logistic Regression</a></td></tr>
+<tr class="memdesc:group__grp__mlogreg"><td class="mdescLeft">&#160;</td><td class="mdescRight">Also called as softmax regression, models the relationship between one or more independent variables and a categorical dependent variable. <br /></td></tr>
+<tr class="separator:"><td class="memSeparator" colspan="2">&#160;</td></tr>
+<tr class="memitem:group__grp__profile"><td class="memItemLeft" align="right" valign="top">&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__grp__profile.html">Profile</a></td></tr>
+<tr class="memdesc:group__grp__profile"><td class="mdescLeft">&#160;</td><td class="mdescRight">Produces a "profile" of a table or view by running a predefined set of aggregates on each column. <br /></td></tr>
+<tr class="separator:"><td class="memSeparator" colspan="2">&#160;</td></tr>
+<tr class="memitem:group__grp__quantile"><td class="memItemLeft" align="right" valign="top">&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__grp__quantile.html">Quantile</a></td></tr>
+<tr class="memdesc:group__grp__quantile"><td class="mdescLeft">&#160;</td><td class="mdescRight">Computes a quantile value for a column in a table. <br /></td></tr>
+<tr class="separator:"><td class="memSeparator" colspan="2">&#160;</td></tr>
+<tr class="memitem:group__grp__rf"><td class="memItemLeft" align="right" valign="top">&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__grp__rf.html">Random Forest (old implementation)</a></td></tr>
+<tr class="memdesc:group__grp__rf"><td class="mdescLeft">&#160;</td><td class="mdescRight">Constructs a classification model that outputs the class most frequently chosen by many decision trees constructed from a training dataset. <br /></td></tr>
+<tr class="separator:"><td class="memSeparator" colspan="2">&#160;</td></tr>
+</table>
+</div><!-- contents -->
+</div><!-- doc-content -->
+<!-- start footer part -->
+<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
+  <ul>
+    <li class="footer">Generated on Mon Jul 27 2015 20:37:45 for MADlib by
+    <a href="http://www.doxygen.org/index.html">
+    <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.10 </li>
+  </ul>
+</div>
+</body>
+</html>

http://git-wip-us.apache.org/repos/asf/incubator-madlib-site/blob/7258f51a/docs/latest/group__grp__deprecated.js
----------------------------------------------------------------------
diff --git a/docs/latest/group__grp__deprecated.js b/docs/latest/group__grp__deprecated.js
new file mode 100644
index 0000000..dfdf4a1
--- /dev/null
+++ b/docs/latest/group__grp__deprecated.js
@@ -0,0 +1,9 @@
+var group__grp__deprecated =
+[
+    [ "Decision Tree (old C4.5 implementation)", "group__grp__dectree.html", null ],
+    [ "Matrix Factorization", "group__grp__svdmf.html", null ],
+    [ "Multinomial Logistic Regression", "group__grp__mlogreg.html", null ],
+    [ "Profile", "group__grp__profile.html", null ],
+    [ "Quantile", "group__grp__quantile.html", null ],
+    [ "Random Forest (old implementation)", "group__grp__rf.html", null ]
+];
\ No newline at end of file