You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@madlib.apache.org by ri...@apache.org on 2016/03/30 02:58:52 UTC

[12/51] [partial] incubator-madlib-site git commit: Add all files from old site (madlib.net)

http://git-wip-us.apache.org/repos/asf/incubator-madlib-site/blob/7258f51a/docs/latest/group__grp__glm.html
----------------------------------------------------------------------
diff --git a/docs/latest/group__grp__glm.html b/docs/latest/group__grp__glm.html
new file mode 100644
index 0000000..a6412a7
--- /dev/null
+++ b/docs/latest/group__grp__glm.html
@@ -0,0 +1,579 @@
+<!-- HTML header for doxygen 1.8.4-->
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<meta name="generator" content="Doxygen 1.8.10"/>
+<meta name="keywords" content="madlib,postgres,greenplum,machine learning,data mining,deep learning,ensemble methods,data science,market basket analysis,affinity analysis,pca,lda,regression,elastic net,huber white,proportional hazards,k-means,latent dirichlet allocation,bayes,support vector machines,svm"/>
+<title>MADlib: Generalized Linear Models</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="navtree.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="resize.js"></script>
+<script type="text/javascript" src="navtreedata.js"></script>
+<script type="text/javascript" src="navtree.js"></script>
+<script type="text/javascript">
+  $(document).ready(initResizable);
+  $(window).load(resizeHeight);
+</script>
+<link href="search/search.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="search/searchdata.js"></script>
+<script type="text/javascript" src="search/search.js"></script>
+<script type="text/javascript">
+  $(document).ready(function() { init_search(); });
+</script>
+<script type="text/x-mathjax-config">
+  MathJax.Hub.Config({
+    extensions: ["tex2jax.js", "TeX/AMSmath.js", "TeX/AMSsymbols.js"],
+    jax: ["input/TeX","output/HTML-CSS"],
+});
+</script><script src="../mathjax/MathJax.js"></script>
+<!-- hack in the navigation tree -->
+<script type="text/javascript" src="navtree_hack.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+<link href="madlib_extra.css" rel="stylesheet" type="text/css"/>
+<!-- google analytics -->
+<script>
+  (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
+  (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
+  m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
+  })(window,document,'script','//www.google-analytics.com/analytics.js','ga');
+  ga('create', 'UA-45382226-1', 'auto');
+  ga('send', 'pageview');
+</script>
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td id="projectlogo"><a href="http://madlib.incubator.apache.org"><img alt="Logo" src="madlib.png" height="50" style="padding-left:0.5em;" border="0"/ ></a></td>
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">
+   <span id="projectnumber">1.8</span>
+   </div>
+   <div id="projectbrief">User Documentation for MADlib</div>
+  </td>
+   <td>        <div id="MSearchBox" class="MSearchBoxInactive">
+        <span class="left">
+          <img id="MSearchSelect" src="search/mag_sel.png"
+               onmouseover="return searchBox.OnSearchSelectShow()"
+               onmouseout="return searchBox.OnSearchSelectHide()"
+               alt=""/>
+          <input type="text" id="MSearchField" value="Search" accesskey="S"
+               onfocus="searchBox.OnSearchFieldFocus(true)" 
+               onblur="searchBox.OnSearchFieldFocus(false)" 
+               onkeyup="searchBox.OnSearchFieldChange(event)"/>
+          </span><span class="right">
+            <a id="MSearchClose" href="javascript:searchBox.CloseResultsWindow()"><img id="MSearchCloseImg" border="0" src="search/close.png" alt=""/></a>
+          </span>
+        </div>
+</td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.10 -->
+<script type="text/javascript">
+var searchBox = new SearchBox("searchBox", "search",false,'Search');
+</script>
+</div><!-- top -->
+<div id="side-nav" class="ui-resizable side-nav-resizable">
+  <div id="nav-tree">
+    <div id="nav-tree-contents">
+      <div id="nav-sync" class="sync"></div>
+    </div>
+  </div>
+  <div id="splitbar" style="-moz-user-select:none;" 
+       class="ui-resizable-handle">
+  </div>
+</div>
+<script type="text/javascript">
+$(document).ready(function(){initNavTree('group__grp__glm.html','');});
+</script>
+<div id="doc-content">
+<!-- window showing the filter options -->
+<div id="MSearchSelectWindow"
+     onmouseover="return searchBox.OnSearchSelectShow()"
+     onmouseout="return searchBox.OnSearchSelectHide()"
+     onkeydown="return searchBox.OnSearchSelectKey(event)">
+</div>
+
+<!-- iframe showing the search results (closed by default) -->
+<div id="MSearchResultsWindow">
+<iframe src="javascript:void(0)" frameborder="0" 
+        name="MSearchResults" id="MSearchResults">
+</iframe>
+</div>
+
+<div class="header">
+  <div class="headertitle">
+<div class="title">Generalized Linear Models<div class="ingroups"><a class="el" href="group__grp__super.html">Supervised Learning</a> &raquo; <a class="el" href="group__grp__regml.html">Regression Models</a></div></div>  </div>
+</div><!--header-->
+<div class="contents">
+<div class="toc"><b>Contents</b></p><ul>
+<li class="level1">
+<a href="#train">Training Function</a> </li>
+<li class="level1">
+<a href="#predict">Prediction Function</a> </li>
+<li class="level1">
+<a href="#examples">Examples</a> </li>
+<li class="level1">
+<a href="#related">Related Topics</a> </li>
+</ul>
+</div><p>Generalized linear models extends ordinary linear regression by allowing the response variable to follow a more general set of distributions (rather than simply Gaussian distributions), and for a general family of functions of the response variable (the link function) to vary linearly with the predicted values (rather than assuming that the response itself must vary linearly).</p>
+<p>For example, data of counts would typically be modeled with a Poisson distribution and a log link, while binary outcomes would typically be modeled with a Bernoulli distribution (or binomial distribution, depending on exactly how the problem is phrased) and a log-odds (or logit) link function.</p>
+<p>Currently, the implemented distribution families are </p><center> <table class="doxtable">
+<tr>
+<th>Distribution Family </th><th>Link Functions  </th></tr>
+<tr>
+<td>Binomial </td><td>logit, probit  </td></tr>
+<tr>
+<td>Gamma </td><td>inverse, identity, log  </td></tr>
+<tr>
+<td>Gaussian </td><td>identity, inverse, log  </td></tr>
+<tr>
+<td>Inverse Gaussian </td><td>inverse of square, inverse, identity, log  </td></tr>
+<tr>
+<td>Poisson </td><td>log, identity, square-root<br />
+  </td></tr>
+</table>
+</center><p><a class="anchor" id="train"></a></p><dl class="section user"><dt>Training Function</dt><dd>GLM training function has the following format: <pre class="syntax">
+glm(source_table,
+    model_table,
+    dependent_varname,
+    independent_varname,
+    family_params,
+    grouping_col,
+    optim_params,
+    verbose
+    )
+</pre> <b>Arguments</b> <dl class="arglist">
+<dt>source_table </dt>
+<dd><p class="startdd">TEXT. The name of the table containing the training data.</p>
+<p class="enddd"></p>
+</dd>
+<dt>model_table </dt>
+<dd><p class="startdd">TEXT. Name of the generated table containing the model.</p>
+<p>The model table produced by glm contains the following columns:</p>
+<table  class="output">
+<tr>
+<th>&lt;...&gt; </th><td><p class="starttd">Text. Grouping columns, if provided in input. This could be multiple columns depending on the <code>grouping_col</code> input. </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>coef </th><td><p class="starttd">FLOAT8. Vector of the coefficients in linear predictor. </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>log_likelihood </th><td><p class="starttd">FLOAT8. The log-likelihood \( l(\boldsymbol \beta) \). We use the maximum likelihood estimate of dispersion parameter to calculate the log-likelihood while R and Python use deviance estimate and Pearson estimate respectively. </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>std_err </th><td><p class="starttd">FLOAT8[]. Vector of the standard error of the coefficients. </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>z_stats or t_stats </th><td><p class="starttd">FLOAT8[]. Vector of the z-statistics (in Poisson distribtuion and Binomial distribution) or the t-statistics (in all other distributions) of the coefficients. </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>p_values </th><td><p class="starttd">FLOAT8[]. Vector of the p-values of the coefficients. </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>dispersion </th><td><p class="starttd">FLOAT8. The dispersion value (Pearson estimate). When family=poisson or family=binomial, the dispersion is always 1. </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>num_rows_processed </th><td><p class="starttd">BIGINT. Numbers of rows processed. </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>num_rows_skipped </th><td><p class="starttd">BIGINT. Numbers of rows skipped due to missing values or failures. </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>num_iterations </th><td>INTEGER. The number of iterations actually completed. This would be different from the <code>nIterations</code> argument if a <code>tolerance</code> parameter is provided and the algorithm converges before all iterations are completed.  </td></tr>
+</table>
+<p>A summary table named &lt;model_table&gt;_summary is also created at the same time, which has the following columns: </p><table  class="output">
+<tr>
+<th>method </th><td><p class="starttd">'glm' </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>source_table </th><td><p class="starttd">The data source table name. </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>model_table </th><td><p class="starttd">The model table name. </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>dependent_varname </th><td><p class="starttd">The dependent variable. </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>independent_varname </th><td><p class="starttd">The independent variables </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>family_params </th><td><p class="starttd">A string that contains family parameters, and has the form of 'family=..., link=...' </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>grouping_col </th><td><p class="starttd">Name of grouping columns. </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>optimizer_params </th><td><p class="starttd">A string that contains optimizer parameters, and has the form of 'optimizer=..., max_iter=..., tolerance=...' </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>num_all_groups </th><td><p class="starttd">Number of groups in glm training. </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>num_failed_groups </th><td><p class="starttd">Number of failed groups in glm training. </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>total_rows_processed </th><td><p class="starttd">BIGINT. Total numbers of rows processed in all groups. </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th>total_rows_skipped </th><td><p class="starttd">BIGINT. Total numbers of rows skipped in all groups due to missing values or failures. </p>
+<p class="endtd"></p>
+</td></tr>
+</table>
+<p class="enddd"></p>
+</dd>
+<dt>dependent_varname </dt>
+<dd><p class="startdd">TEXT. Name of the dependent variable column.</p>
+<p class="enddd"></p>
+</dd>
+<dt>independent_varname </dt>
+<dd><p class="startdd">TEXT. Expression list to evaluate for the independent variables. An intercept variable is not assumed. It is common to provide an explicit intercept term by including a single constant <code>1</code> term in the independent variable list.</p>
+<p class="enddd"></p>
+</dd>
+<dt>family_params (optional) </dt>
+<dd><p class="startdd">TEXT, Parameters for distribution family. Currently, we support</p>
+<p>(1) family=poisson and link=[log or identity or sqrt].</p>
+<p>(2) family=gaussian and link=[identity or log or inverse]. And when family=gaussian and link=identity, the GLM model is exactly the same as the linear regression.</p>
+<p>(3) family=gamma and link=[inverse or identity or log].</p>
+<p>(4) family=inverse_gaussian and link=[sqr_inverse or log or identity or inverse].</p>
+<p>(5) family=binomial and link=[probit or logit]. </p>
+<p class="enddd"></p>
+</dd>
+<dt>grouping_col (optional) </dt>
+<dd><p class="startdd">TEXT, default: NULL. An expression list used to group the input dataset into discrete groups, running one regression per group. Similar to the SQL "GROUP BY" clause. When this value is NULL, no grouping is used and a single model is generated.</p>
+<p class="enddd"></p>
+</dd>
+<dt>optim_params (optional) </dt>
+<dd><p class="startdd">TEXT, default: 'max_iter=100,optimizer=irls,tolerance=1e-6'. Parameters for optimizer. Currently, we support tolerance=[tolerance for relative error between log-likelihoods], max_iter=[maximum iterations to run], optimizer=irls.</p>
+<p class="enddd"></p>
+</dd>
+<dt>verbose (optional) </dt>
+<dd>BOOLEAN, default: FALSE. Provides verbose output of the results of training. </dd>
+</dl>
+</dd></dl>
+<dl class="section note"><dt>Note</dt><dd>For p-values, we just return the computation result directly. Other statistical packages, like 'R', produce the same result, but on printing the result to screen, another format function is used and any p-value that is smaller than the machine epsilon (the smallest positive floating-point number 'x' such that '1 + x != 1') will be printed on screen as "&lt; xxx" (xxx is the value of the machine epsilon). Although the results may look different, they are in fact the same. </dd></dl>
+<p><a class="anchor" id="predict"></a></p><dl class="section user"><dt>Prediction Function</dt><dd>The prediction function is provided to estimate the conditional mean given a new predictor. It has the following syntax: <pre class="syntax">
+glm_predict(coef,
+            col_ind_var
+            link)
+</pre></dd></dl>
+<p><b>Arguments</b> </p><dl class="arglist">
+<dt>coef </dt>
+<dd><p class="startdd">DOUBLE PRECISION[]. Model coefficients obtained from <a class="el" href="glm_8sql__in.html#a3f8eb219013e05675626acb8cf4612cc">glm()</a>.</p>
+<p class="enddd"></p>
+</dd>
+<dt>col_ind_var </dt>
+<dd><p class="startdd">New predictor, as a DOUBLE array. This should be the same length as the array obtained by evaluation of the 'independent_varname' argument in <a class="el" href="glm_8sql__in.html#a3f8eb219013e05675626acb8cf4612cc">glm()</a>.</p>
+<p class="enddd"></p>
+</dd>
+<dt>link </dt>
+<dd>link function, as a string. This should match the link function the user inputted in <a class="el" href="glm_8sql__in.html#a3f8eb219013e05675626acb8cf4612cc">glm()</a>. </dd>
+</dl>
+<p><a class="anchor" id="examples"></a></p><dl class="section user"><dt>Examples</dt><dd><ol type="1">
+<li>Create the training data table. <pre class="example">
+CREATE TABLE warpbreaks(
+    id      serial,
+    breaks  integer,
+    wool    char(1),
+    tension char(1)
+);
+INSERT INTO warpbreaks(breaks, wool, tension) VALUES
+(26, 'A', 'L'),
+(30, 'A', 'L'),
+(54, 'A', 'L'),
+(25, 'A', 'L'),
+(70, 'A', 'L'),
+(52, 'A', 'L'),
+(51, 'A', 'L'),
+(26, 'A', 'L'),
+(67, 'A', 'L'),
+(18, 'A', 'M'),
+(21, 'A', 'M'),
+(29, 'A', 'M'),
+(17, 'A', 'M'),
+(12, 'A', 'M'),
+(18, 'A', 'M'),
+(35, 'A', 'M'),
+(30, 'A', 'M'),
+(36, 'A', 'M'),
+(36, 'A', 'H'),
+(21, 'A', 'H'),
+(24, 'A', 'H'),
+(18, 'A', 'H'),
+(10, 'A', 'H'),
+(43, 'A', 'H'),
+(28, 'A', 'H'),
+(15, 'A', 'H'),
+(26, 'A', 'H'),
+(27, 'B', 'L'),
+(14, 'B', 'L'),
+(29, 'B', 'L'),
+(19, 'B', 'L'),
+(29, 'B', 'L'),
+(31, 'B', 'L'),
+(41, 'B', 'L'),
+(20, 'B', 'L'),
+(44, 'B', 'L'),
+(42, 'B', 'M'),
+(26, 'B', 'M'),
+(19, 'B', 'M'),
+(16, 'B', 'M'),
+(39, 'B', 'M'),
+(28, 'B', 'M'),
+(21, 'B', 'M'),
+(39, 'B', 'M'),
+(29, 'B', 'M'),
+(20, 'B', 'H'),
+(21, 'B', 'H'),
+(24, 'B', 'H'),
+(17, 'B', 'H'),
+(13, 'B', 'H'),
+(15, 'B', 'H'),
+(15, 'B', 'H'),
+(16, 'B', 'H'),
+(28, 'B', 'H');
+SELECT create_indicator_variables('warpbreaks', 'warpbreaks_dummy', 'wool,tension');
+</pre></li>
+<li>Train a GLM model. <pre class="example">
+SELECT glm('warpbreaks_dummy',
+           'glm_model',
+           'breaks',
+           'ARRAY[1.0,"wool_B","tension_M", "tension_H"]',
+           'family=poisson, link=log');
+</pre></li>
+<li>View the regression results. <pre class="example">
+-- Set extended display on for easier reading of output
+\x on
+SELECT * FROM glm_model;
+</pre> Result: <pre class="result">
+coef               | {3.69196314494079,-0.205988442638621,-0.321320431600611,-0.51848849651156}
+log_likelihood     | -242.527983208979
+std_err            | {0.04541079434248,0.0515712427835191,0.0602659166951256,0.0639595193956924}
+z_stats            | {81.3014438174473,-3.99425011926316,-5.3317106786264,-8.10651020224019}
+p_values           | {0,6.48993254938271e-05,9.72918600322907e-08,5.20943463005751e-16}
+num_rows_processed | 54
+num_rows_skipped   | 0
+iteration          | 5
+</pre> Alternatively, unnest the arrays in the results for easier reading of output: <pre class="example">
+\x off
+SELECT unnest(coef) as coefficient,
+       unnest(std_err) as standard_error,
+       unnest(z_stats) as z_stat,
+       unnest(p_values) as pvalue
+FROM glm_model;
+</pre></li>
+<li>Predicting dependent variable using GLM model. (This example uses the original data table to perform the prediction. Typically a different test dataset with the same features as the original training dataset would be used for prediction.) <pre class="example">
+\x off
+-- Display predicted mean value on the original dataset
+SELECT
+    w.id,
+    madlib.glm_predict(
+        coef,
+        ARRAY[1, "wool_B", "tension_M", "tension_H"]::float8[],
+        'log') AS mu
+FROM warpbreaks_dummy w, glm_model m
+ORDER BY w.id;
+</pre> <pre class="example">
+-- Display predicted counts (which are predicted mean values rounded to the nearest integral value) on the original dataset
+SELECT
+    w.id,
+    madlib.glm_predict_poisson(
+        coef,
+        ARRAY[1, "wool_B", "tension_M", "tension_H"]::float8[],
+        'log') AS poisson_count
+FROM warpbreaks_dummy w, glm_model m
+ORDER BY w.id;
+</pre></li>
+</ol>
+</dd></dl>
+<p><b>Example for Gaussian family:</b></p>
+<ol type="1">
+<li>Create a testing data table <pre class="example">
+CREATE TABLE abalone (
+    id integer,
+    sex text,
+    length double precision,
+    diameter double precision,
+    height double precision,
+    whole double precision,
+    shucked double precision,
+    viscera double precision,
+    shell double precision,
+    rings integer
+);
+INSERT INTO abalone VALUES
+(3151, 'F', 0.655000000000000027, 0.505000000000000004, 0.165000000000000008, 1.36699999999999999, 0.583500000000000019, 0.351499999999999979, 0.396000000000000019, 10),
+(2026, 'F', 0.550000000000000044, 0.469999999999999973, 0.149999999999999994, 0.920499999999999985, 0.381000000000000005, 0.243499999999999994, 0.267500000000000016, 10),
+(3751, 'I', 0.434999999999999998, 0.375, 0.110000000000000001, 0.41549999999999998, 0.170000000000000012, 0.0759999999999999981, 0.14499999999999999, 8),
+(720, 'I', 0.149999999999999994, 0.100000000000000006, 0.0250000000000000014, 0.0149999999999999994, 0.00449999999999999966, 0.00400000000000000008, 0.0050000000000000001, 2),
+(1635, 'F', 0.574999999999999956, 0.469999999999999973, 0.154999999999999999, 1.1160000000000001, 0.509000000000000008, 0.237999999999999989, 0.340000000000000024, 10),
+(2648, 'I', 0.5, 0.390000000000000013, 0.125, 0.582999999999999963, 0.293999999999999984, 0.132000000000000006, 0.160500000000000004, 8),
+(1796, 'F', 0.57999999999999996, 0.429999999999999993, 0.170000000000000012, 1.47999999999999998, 0.65349999999999997, 0.32400000000000001, 0.41549999999999998, 10),
+(209, 'F', 0.525000000000000022, 0.41499999999999998, 0.170000000000000012, 0.832500000000000018, 0.275500000000000023, 0.168500000000000011, 0.309999999999999998, 13),
+(1451, 'I', 0.455000000000000016, 0.33500000000000002, 0.135000000000000009, 0.501000000000000001, 0.274000000000000021, 0.0995000000000000051, 0.106499999999999997, 7),
+(1108, 'I', 0.510000000000000009, 0.380000000000000004, 0.115000000000000005, 0.515499999999999958, 0.214999999999999997, 0.113500000000000004, 0.166000000000000009, 8),
+(3675, 'F', 0.594999999999999973, 0.450000000000000011, 0.165000000000000008, 1.08099999999999996, 0.489999999999999991, 0.252500000000000002, 0.279000000000000026, 12),
+(2108, 'F', 0.675000000000000044, 0.550000000000000044, 0.179999999999999993, 1.68849999999999989, 0.562000000000000055, 0.370499999999999996, 0.599999999999999978, 15),
+(3312, 'F', 0.479999999999999982, 0.380000000000000004, 0.135000000000000009, 0.507000000000000006, 0.191500000000000004, 0.13650000000000001, 0.154999999999999999, 12),
+(882, 'M', 0.655000000000000027, 0.520000000000000018, 0.165000000000000008, 1.40949999999999998, 0.585999999999999965, 0.290999999999999981, 0.405000000000000027, 9),
+(3402, 'M', 0.479999999999999982, 0.395000000000000018, 0.149999999999999994, 0.681499999999999995, 0.214499999999999996, 0.140500000000000014, 0.2495, 18),
+(829, 'I', 0.409999999999999976, 0.325000000000000011, 0.100000000000000006, 0.394000000000000017, 0.20799999999999999, 0.0655000000000000027, 0.105999999999999997, 6),
+(1305, 'M', 0.535000000000000031, 0.434999999999999998, 0.149999999999999994, 0.716999999999999971, 0.347499999999999976, 0.14449999999999999, 0.194000000000000006, 9),
+(3613, 'M', 0.599999999999999978, 0.46000000000000002, 0.179999999999999993, 1.1399999999999999, 0.422999999999999987, 0.257500000000000007, 0.364999999999999991, 10),
+(1068, 'I', 0.340000000000000024, 0.265000000000000013, 0.0800000000000000017, 0.201500000000000012, 0.0899999999999999967, 0.0475000000000000006, 0.0550000000000000003, 5),
+(2446, 'M', 0.5, 0.380000000000000004, 0.135000000000000009, 0.583500000000000019, 0.22950000000000001, 0.126500000000000001, 0.179999999999999993, 12),
+(1393, 'M', 0.635000000000000009, 0.474999999999999978, 0.170000000000000012, 1.19350000000000001, 0.520499999999999963, 0.269500000000000017, 0.366499999999999992, 10),
+(359, 'M', 0.744999999999999996, 0.584999999999999964, 0.214999999999999997, 2.49900000000000011, 0.92649999999999999, 0.471999999999999975, 0.699999999999999956, 17),
+(549, 'F', 0.564999999999999947, 0.450000000000000011, 0.160000000000000003, 0.79500000000000004, 0.360499999999999987, 0.155499999999999999, 0.23000000000000001, 12),
+(1154, 'F', 0.599999999999999978, 0.474999999999999978, 0.160000000000000003, 1.02649999999999997, 0.484999999999999987, 0.2495, 0.256500000000000006, 9),
+(1790, 'F', 0.54500000000000004, 0.385000000000000009, 0.149999999999999994, 1.11850000000000005, 0.542499999999999982, 0.244499999999999995, 0.284499999999999975, 9),
+(3703, 'F', 0.665000000000000036, 0.540000000000000036, 0.195000000000000007, 1.76400000000000001, 0.850500000000000034, 0.361499999999999988, 0.469999999999999973, 11),
+(1962, 'F', 0.655000000000000027, 0.515000000000000013, 0.179999999999999993, 1.41199999999999992, 0.619500000000000051, 0.248499999999999999, 0.496999999999999997, 11),
+(1665, 'I', 0.604999999999999982, 0.469999999999999973, 0.14499999999999999, 0.802499999999999991, 0.379000000000000004, 0.226500000000000007, 0.220000000000000001, 9),
+(635, 'M', 0.359999999999999987, 0.294999999999999984, 0.100000000000000006, 0.210499999999999993, 0.0660000000000000031, 0.0524999999999999981, 0.0749999999999999972, 9),
+(3901, 'M', 0.445000000000000007, 0.344999999999999973, 0.140000000000000013, 0.475999999999999979, 0.205499999999999988, 0.101500000000000007, 0.108499999999999999, 15),
+(2734, 'I', 0.41499999999999998, 0.33500000000000002, 0.100000000000000006, 0.357999999999999985, 0.169000000000000011, 0.067000000000000004, 0.104999999999999996, 7),
+(3856, 'M', 0.409999999999999976, 0.33500000000000002, 0.115000000000000005, 0.440500000000000003, 0.190000000000000002, 0.0850000000000000061, 0.135000000000000009, 8),
+(827, 'I', 0.395000000000000018, 0.28999999999999998, 0.0950000000000000011, 0.303999999999999992, 0.127000000000000002, 0.0840000000000000052, 0.076999999999999999, 6),
+(3381, 'I', 0.190000000000000002, 0.130000000000000004, 0.0449999999999999983, 0.0264999999999999993, 0.00899999999999999932, 0.0050000000000000001, 0.00899999999999999932, 5),
+(3972, 'I', 0.400000000000000022, 0.294999999999999984, 0.0950000000000000011, 0.252000000000000002, 0.110500000000000001, 0.0575000000000000025, 0.0660000000000000031, 6),
+(1155, 'M', 0.599999999999999978, 0.455000000000000016, 0.170000000000000012, 1.1915, 0.695999999999999952, 0.239499999999999991, 0.239999999999999991, 8),
+(3467, 'M', 0.640000000000000013, 0.5, 0.170000000000000012, 1.4544999999999999, 0.642000000000000015, 0.357499999999999984, 0.353999999999999981, 9),
+(2433, 'F', 0.609999999999999987, 0.484999999999999987, 0.165000000000000008, 1.08699999999999997, 0.425499999999999989, 0.232000000000000012, 0.380000000000000004, 11),
+(552, 'I', 0.614999999999999991, 0.489999999999999991, 0.154999999999999999, 0.988500000000000045, 0.41449999999999998, 0.195000000000000007, 0.344999999999999973, 13),
+(1425, 'F', 0.729999999999999982, 0.57999999999999996, 0.190000000000000002, 1.73750000000000004, 0.678499999999999992, 0.434499999999999997, 0.520000000000000018, 11),
+(2402, 'F', 0.584999999999999964, 0.41499999999999998, 0.154999999999999999, 0.69850000000000001, 0.299999999999999989, 0.145999999999999991, 0.195000000000000007, 12),
+(1748, 'F', 0.699999999999999956, 0.535000000000000031, 0.174999999999999989, 1.77299999999999991, 0.680499999999999994, 0.479999999999999982, 0.512000000000000011, 15),
+(3983, 'I', 0.57999999999999996, 0.434999999999999998, 0.149999999999999994, 0.891499999999999959, 0.362999999999999989, 0.192500000000000004, 0.251500000000000001, 6),
+(335, 'F', 0.739999999999999991, 0.599999999999999978, 0.195000000000000007, 1.97399999999999998, 0.597999999999999976, 0.408499999999999974, 0.709999999999999964, 16),
+(1587, 'I', 0.515000000000000013, 0.349999999999999978, 0.104999999999999996, 0.474499999999999977, 0.212999999999999995, 0.122999999999999998, 0.127500000000000002, 10),
+(2448, 'I', 0.275000000000000022, 0.204999999999999988, 0.0800000000000000017, 0.096000000000000002, 0.0359999999999999973, 0.0184999999999999991, 0.0299999999999999989, 6),
+(1362, 'F', 0.604999999999999982, 0.474999999999999978, 0.174999999999999989, 1.07600000000000007, 0.463000000000000023, 0.219500000000000001, 0.33500000000000002, 9),
+(2799, 'M', 0.640000000000000013, 0.484999999999999987, 0.149999999999999994, 1.09800000000000009, 0.519499999999999962, 0.222000000000000003, 0.317500000000000004, 10),
+(1413, 'F', 0.67000000000000004, 0.505000000000000004, 0.174999999999999989, 1.01449999999999996, 0.4375, 0.271000000000000019, 0.3745, 10),
+(1739, 'F', 0.67000000000000004, 0.540000000000000036, 0.195000000000000007, 1.61899999999999999, 0.739999999999999991, 0.330500000000000016, 0.465000000000000024, 11),
+(1152, 'M', 0.584999999999999964, 0.465000000000000024, 0.160000000000000003, 0.955500000000000016, 0.45950000000000002, 0.235999999999999988, 0.265000000000000013, 7),
+(2427, 'I', 0.564999999999999947, 0.434999999999999998, 0.154999999999999999, 0.782000000000000028, 0.271500000000000019, 0.16800000000000001, 0.284999999999999976, 14),
+(1777, 'M', 0.484999999999999987, 0.369999999999999996, 0.154999999999999999, 0.967999999999999972, 0.418999999999999984, 0.245499999999999996, 0.236499999999999988, 9),
+(3294, 'M', 0.574999999999999956, 0.455000000000000016, 0.184999999999999998, 1.15599999999999992, 0.552499999999999991, 0.242999999999999994, 0.294999999999999984, 13),
+(1403, 'M', 0.650000000000000022, 0.510000000000000009, 0.190000000000000002, 1.54200000000000004, 0.715500000000000025, 0.373499999999999999, 0.375, 9),
+(2256, 'M', 0.510000000000000009, 0.395000000000000018, 0.14499999999999999, 0.61850000000000005, 0.215999999999999998, 0.138500000000000012, 0.239999999999999991, 12),
+(3984, 'F', 0.584999999999999964, 0.450000000000000011, 0.125, 0.873999999999999999, 0.354499999999999982, 0.20749999999999999, 0.225000000000000006, 6),
+(1116, 'M', 0.525000000000000022, 0.405000000000000027, 0.119999999999999996, 0.755499999999999949, 0.3755, 0.155499999999999999, 0.201000000000000012, 9),
+(1366, 'M', 0.609999999999999987, 0.474999999999999978, 0.170000000000000012, 1.02649999999999997, 0.434999999999999998, 0.233500000000000013, 0.303499999999999992, 10),
+(3759, 'I', 0.525000000000000022, 0.400000000000000022, 0.140000000000000013, 0.605500000000000038, 0.260500000000000009, 0.107999999999999999, 0.209999999999999992, 9);
+</pre></li>
+<li>Train a model with family=gaussian and link=identity <pre class="example">
+SELECT madlib.glm(
+    'abalone',
+    'abalone_out',
+    'rings',
+    'ARRAY[1, length, diameter, height, whole, shucked, viscera, shell]',
+    'family=gaussian, link=identity');
+</pre></li>
+</ol>
+<p><b>Example for Gamma family:</b> (reuse the dataset in Gaussian case)</p>
+<ol type="1">
+<li>Reuse the test data set in Gaussian</li>
+<li>Train a model with family=gamma and link=inverse <pre class="example">
+SELECT madlib.glm(
+    'abalone',
+    'abalone_out',
+    'rings',
+    'ARRAY[1, length, diameter, height, whole, shucked, viscera, shell]',
+    'family=gamma, link=inverse');
+</pre></li>
+</ol>
+<p><b>Example for Inverse Gaussian family:</b> (reuse the dataset in Gaussian case)</p>
+<ol type="1">
+<li>Reuse the test data set in Gaussian</li>
+<li>Train a model with family=inverse_gaussian and link=sqr_inverse <pre class="example">
+SELECT madlib.glm(
+    'abalone',
+    'abalone_out',
+    'rings',
+    'ARRAY[1, length, diameter, height, whole, shucked, viscera, shell]',
+    'family=inverse_gaussian, link=sqr_inverse');
+</pre></li>
+</ol>
+<p><b>Example for Binomial family:</b> (reuse the dataset in Gaussian case)</p>
+<ol type="1">
+<li>Reuse the test data set in Gaussian</li>
+<li>Train a model with family=binomial and link=probit <pre class="example">
+SELECT madlib.glm(
+    'abalone',
+    'abalone_out',
+    'rings &lt; 10',
+    'ARRAY[1, length, diameter, height, whole, shucked, viscera, shell]',
+    'family=binomial, link=probit');
+</pre></li>
+<li>Predict output probabilities <pre class="example">
+SELECT madlib.glm_predict(
+    coef,
+    ARRAY[1, length, diameter, height, whole, shucked, viscera, shell]::float8[],
+    'probit')
+FROM abalone_out, abalone;
+</pre></li>
+<li>Predict output categories <pre class="example">
+SELECT madlib.glm_predict(
+SELECT madlib.glm_predict_binomial(
+    coef,
+    ARRAY[1, length, diameter, height, whole, shucked, viscera, shell]::float8[],
+    'probit')
+FROM abalone_out, abalone;
+</pre></li>
+</ol>
+<p><a class="anchor" id="notes"></a></p><dl class="section user"><dt>Notes</dt><dd>All table names can be optionally schema qualified (current_schemas() would be searched if a schema name is not provided) and all table and column names should follow case-sensitivity and quoting rules per the database. (For instance, 'mytable' and 'MyTable' both resolve to the same entity, i.e. 'mytable'. If mixed-case or multi-byte characters are desired for entity names then the string should be double-quoted; in this case the input would be '"MyTable"').</dd></dl>
+<p>Currently implementation uses Newton's method and, according to performance tests, when number of features are over 1000, this GLM function could be running slowly.</p>
+<p>Functions in <a class="el" href="group__grp__linreg.html">Linear Regression</a> is prefered to GLM with family=gaussian,link=identity, as the former require only a single pass over the training data. In addition, if user expects to use robust variance, clustered variance, or marginal effects on top of the trained model, functions in <a class="el" href="group__grp__linreg.html">Linear Regression</a> and <a class="el" href="group__grp__logreg.html">Logistic Regression</a> should be used.</p>
+<p><a class="anchor" id="related"></a></p><dl class="section user"><dt>Related Topics</dt><dd></dd></dl>
+<p>File <a class="el" href="glm_8sql__in.html" title="SQL functions for GLM (Poisson) ">glm.sql_in</a> documenting the training function</p>
+<p><a class="el" href="group__grp__linreg.html">Linear Regression</a></p>
+<p><a class="el" href="group__grp__logreg.html">Logistic Regression</a></p>
+<p><a class="el" href="group__grp__mlogreg.html">Multinomial Logistic Regression</a></p>
+<p><a class="el" href="group__grp__robust.html">Robust Variance</a></p>
+<p><a class="el" href="group__grp__clustered__errors.html">Clustered Variance</a></p>
+<p><a class="el" href="group__grp__validation.html">Cross Validation</a></p>
+<p><a class="el" href="group__grp__marginal.html">Marginal Effects</a></p>
+</div><!-- contents -->
+</div><!-- doc-content -->
+<!-- start footer part -->
+<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
+  <ul>
+    <li class="footer">Generated on Mon Jul 27 2015 20:37:45 for MADlib by
+    <a href="http://www.doxygen.org/index.html">
+    <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.10 </li>
+  </ul>
+</div>
+</body>
+</html>

http://git-wip-us.apache.org/repos/asf/incubator-madlib-site/blob/7258f51a/docs/latest/group__grp__glm.js
----------------------------------------------------------------------
diff --git a/docs/latest/group__grp__glm.js b/docs/latest/group__grp__glm.js
new file mode 100644
index 0000000..a5e6eda
--- /dev/null
+++ b/docs/latest/group__grp__glm.js
@@ -0,0 +1,11 @@
+var group__grp__glm =
+[
+    [ "Linear Regression", "group__grp__linreg.html", null ],
+    [ "Logistic Regression", "group__grp__logreg.html", null ],
+    [ "Multinomial Logistic Regression", "group__grp__mlogreg.html", null ],
+    [ "Elastic Net Regularization", "group__grp__elasticnet.html", null ],
+    [ "Cox-Proportional Hazards Regression", "group__grp__cox__prop__hazards.html", null ],
+    [ "Robust Variance", "group__grp__robust.html", null ],
+    [ "Clustered Variance", "group__grp__clustered__errors.html", null ],
+    [ "Marginal Effects", "group__grp__marginal.html", null ]
+];
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-madlib-site/blob/7258f51a/docs/latest/group__grp__inf__stats.html
----------------------------------------------------------------------
diff --git a/docs/latest/group__grp__inf__stats.html b/docs/latest/group__grp__inf__stats.html
new file mode 100644
index 0000000..298bbe0
--- /dev/null
+++ b/docs/latest/group__grp__inf__stats.html
@@ -0,0 +1,140 @@
+<!-- HTML header for doxygen 1.8.4-->
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<meta name="generator" content="Doxygen 1.8.10"/>
+<meta name="keywords" content="madlib,postgres,greenplum,machine learning,data mining,deep learning,ensemble methods,data science,market basket analysis,affinity analysis,pca,lda,regression,elastic net,huber white,proportional hazards,k-means,latent dirichlet allocation,bayes,support vector machines,svm"/>
+<title>MADlib: Inferential Statistics</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="navtree.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="resize.js"></script>
+<script type="text/javascript" src="navtreedata.js"></script>
+<script type="text/javascript" src="navtree.js"></script>
+<script type="text/javascript">
+  $(document).ready(initResizable);
+  $(window).load(resizeHeight);
+</script>
+<link href="search/search.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="search/searchdata.js"></script>
+<script type="text/javascript" src="search/search.js"></script>
+<script type="text/javascript">
+  $(document).ready(function() { init_search(); });
+</script>
+<script type="text/x-mathjax-config">
+  MathJax.Hub.Config({
+    extensions: ["tex2jax.js", "TeX/AMSmath.js", "TeX/AMSsymbols.js"],
+    jax: ["input/TeX","output/HTML-CSS"],
+});
+</script><script src="../mathjax/MathJax.js"></script>
+<!-- hack in the navigation tree -->
+<script type="text/javascript" src="navtree_hack.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+<link href="madlib_extra.css" rel="stylesheet" type="text/css"/>
+<!-- google analytics -->
+<script>
+  (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
+  (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
+  m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
+  })(window,document,'script','//www.google-analytics.com/analytics.js','ga');
+  ga('create', 'UA-45382226-1', 'auto');
+  ga('send', 'pageview');
+</script>
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td id="projectlogo"><a href="http://madlib.incubator.apache.org"><img alt="Logo" src="madlib.png" height="50" style="padding-left:0.5em;" border="0"/ ></a></td>
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">
+   <span id="projectnumber">1.8</span>
+   </div>
+   <div id="projectbrief">User Documentation for MADlib</div>
+  </td>
+   <td>        <div id="MSearchBox" class="MSearchBoxInactive">
+        <span class="left">
+          <img id="MSearchSelect" src="search/mag_sel.png"
+               onmouseover="return searchBox.OnSearchSelectShow()"
+               onmouseout="return searchBox.OnSearchSelectHide()"
+               alt=""/>
+          <input type="text" id="MSearchField" value="Search" accesskey="S"
+               onfocus="searchBox.OnSearchFieldFocus(true)" 
+               onblur="searchBox.OnSearchFieldFocus(false)" 
+               onkeyup="searchBox.OnSearchFieldChange(event)"/>
+          </span><span class="right">
+            <a id="MSearchClose" href="javascript:searchBox.CloseResultsWindow()"><img id="MSearchCloseImg" border="0" src="search/close.png" alt=""/></a>
+          </span>
+        </div>
+</td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.10 -->
+<script type="text/javascript">
+var searchBox = new SearchBox("searchBox", "search",false,'Search');
+</script>
+</div><!-- top -->
+<div id="side-nav" class="ui-resizable side-nav-resizable">
+  <div id="nav-tree">
+    <div id="nav-tree-contents">
+      <div id="nav-sync" class="sync"></div>
+    </div>
+  </div>
+  <div id="splitbar" style="-moz-user-select:none;" 
+       class="ui-resizable-handle">
+  </div>
+</div>
+<script type="text/javascript">
+$(document).ready(function(){initNavTree('group__grp__inf__stats.html','');});
+</script>
+<div id="doc-content">
+<!-- window showing the filter options -->
+<div id="MSearchSelectWindow"
+     onmouseover="return searchBox.OnSearchSelectShow()"
+     onmouseout="return searchBox.OnSearchSelectHide()"
+     onkeydown="return searchBox.OnSearchSelectKey(event)">
+</div>
+
+<!-- iframe showing the search results (closed by default) -->
+<div id="MSearchResultsWindow">
+<iframe src="javascript:void(0)" frameborder="0" 
+        name="MSearchResults" id="MSearchResults">
+</iframe>
+</div>
+
+<div class="header">
+  <div class="summary">
+<a href="#groups">Modules</a>  </div>
+  <div class="headertitle">
+<div class="title">Inferential Statistics<div class="ingroups"><a class="el" href="group__grp__stats.html">Statistics</a></div></div>  </div>
+</div><!--header-->
+<div class="contents">
+<a name="details" id="details"></a><h2 class="groupheader">Detailed Description</h2>
+<p>A collection of methods to compute inferential statistics on a dataset. </p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2 class="groupheader"><a name="groups"></a>
+Modules</h2></td></tr>
+<tr class="memitem:group__grp__stats__tests"><td class="memItemLeft" align="right" valign="top">&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__grp__stats__tests.html">Hypothesis Tests</a></td></tr>
+<tr class="memdesc:group__grp__stats__tests"><td class="mdescLeft">&#160;</td><td class="mdescRight">Provides functions to perform statistical hypothesis tests. <br /></td></tr>
+<tr class="separator:"><td class="memSeparator" colspan="2">&#160;</td></tr>
+</table>
+</div><!-- contents -->
+</div><!-- doc-content -->
+<!-- start footer part -->
+<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
+  <ul>
+    <li class="footer">Generated on Mon Jul 27 2015 20:37:45 for MADlib by
+    <a href="http://www.doxygen.org/index.html">
+    <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.10 </li>
+  </ul>
+</div>
+</body>
+</html>

http://git-wip-us.apache.org/repos/asf/incubator-madlib-site/blob/7258f51a/docs/latest/group__grp__inf__stats.js
----------------------------------------------------------------------
diff --git a/docs/latest/group__grp__inf__stats.js b/docs/latest/group__grp__inf__stats.js
new file mode 100644
index 0000000..7bfc14a
--- /dev/null
+++ b/docs/latest/group__grp__inf__stats.js
@@ -0,0 +1,4 @@
+var group__grp__inf__stats =
+[
+    [ "Hypothesis Tests", "group__grp__stats__tests.html", null ]
+];
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-madlib-site/blob/7258f51a/docs/latest/group__grp__kernmach.html
----------------------------------------------------------------------
diff --git a/docs/latest/group__grp__kernmach.html b/docs/latest/group__grp__kernmach.html
new file mode 100644
index 0000000..d0f7ba4
--- /dev/null
+++ b/docs/latest/group__grp__kernmach.html
@@ -0,0 +1,361 @@
+<!-- HTML header for doxygen 1.8.4-->
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<meta name="generator" content="Doxygen 1.8.10"/>
+<meta name="keywords" content="madlib,postgres,greenplum,machine learning,data mining,deep learning,ensemble methods,data science,market basket analysis,affinity analysis,pca,lda,regression,elastic net,huber white,proportional hazards,k-means,latent dirichlet allocation,bayes,support vector machines,svm"/>
+<title>MADlib: Support Vector Machines</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="navtree.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="resize.js"></script>
+<script type="text/javascript" src="navtreedata.js"></script>
+<script type="text/javascript" src="navtree.js"></script>
+<script type="text/javascript">
+  $(document).ready(initResizable);
+  $(window).load(resizeHeight);
+</script>
+<link href="search/search.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="search/searchdata.js"></script>
+<script type="text/javascript" src="search/search.js"></script>
+<script type="text/javascript">
+  $(document).ready(function() { init_search(); });
+</script>
+<script type="text/x-mathjax-config">
+  MathJax.Hub.Config({
+    extensions: ["tex2jax.js", "TeX/AMSmath.js", "TeX/AMSsymbols.js"],
+    jax: ["input/TeX","output/HTML-CSS"],
+});
+</script><script src="../mathjax/MathJax.js"></script>
+<!-- hack in the navigation tree -->
+<script type="text/javascript" src="navtree_hack.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+<link href="madlib_extra.css" rel="stylesheet" type="text/css"/>
+<!-- google analytics -->
+<script>
+  (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
+  (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
+  m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
+  })(window,document,'script','//www.google-analytics.com/analytics.js','ga');
+  ga('create', 'UA-45382226-1', 'auto');
+  ga('send', 'pageview');
+</script>
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td id="projectlogo"><a href="http://madlib.incubator.apache.org"><img alt="Logo" src="madlib.png" height="50" style="padding-left:0.5em;" border="0"/ ></a></td>
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">
+   <span id="projectnumber">1.8</span>
+   </div>
+   <div id="projectbrief">User Documentation for MADlib</div>
+  </td>
+   <td>        <div id="MSearchBox" class="MSearchBoxInactive">
+        <span class="left">
+          <img id="MSearchSelect" src="search/mag_sel.png"
+               onmouseover="return searchBox.OnSearchSelectShow()"
+               onmouseout="return searchBox.OnSearchSelectHide()"
+               alt=""/>
+          <input type="text" id="MSearchField" value="Search" accesskey="S"
+               onfocus="searchBox.OnSearchFieldFocus(true)" 
+               onblur="searchBox.OnSearchFieldFocus(false)" 
+               onkeyup="searchBox.OnSearchFieldChange(event)"/>
+          </span><span class="right">
+            <a id="MSearchClose" href="javascript:searchBox.CloseResultsWindow()"><img id="MSearchCloseImg" border="0" src="search/close.png" alt=""/></a>
+          </span>
+        </div>
+</td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.10 -->
+<script type="text/javascript">
+var searchBox = new SearchBox("searchBox", "search",false,'Search');
+</script>
+</div><!-- top -->
+<div id="side-nav" class="ui-resizable side-nav-resizable">
+  <div id="nav-tree">
+    <div id="nav-tree-contents">
+      <div id="nav-sync" class="sync"></div>
+    </div>
+  </div>
+  <div id="splitbar" style="-moz-user-select:none;" 
+       class="ui-resizable-handle">
+  </div>
+</div>
+<script type="text/javascript">
+$(document).ready(function(){initNavTree('group__grp__kernmach.html','');});
+</script>
+<div id="doc-content">
+<!-- window showing the filter options -->
+<div id="MSearchSelectWindow"
+     onmouseover="return searchBox.OnSearchSelectShow()"
+     onmouseout="return searchBox.OnSearchSelectHide()"
+     onkeydown="return searchBox.OnSearchSelectKey(event)">
+</div>
+
+<!-- iframe showing the search results (closed by default) -->
+<div id="MSearchResultsWindow">
+<iframe src="javascript:void(0)" frameborder="0" 
+        name="MSearchResults" id="MSearchResults">
+</iframe>
+</div>
+
+<div class="header">
+  <div class="headertitle">
+<div class="title">Support Vector Machines<div class="ingroups"><a class="el" href="group__grp__early__stage.html">Early Stage Development</a></div></div>  </div>
+</div><!--header-->
+<div class="contents">
+<div class="toc"><b>Contents</b> </p><ul>
+<li>
+<a href="#learn">Regression Learning Function</a> </li>
+<li>
+<a href="#classify">Classification Learning Function</a> </li>
+<li>
+<a href="#novelty">Novelty Detection Functions</a> </li>
+<li>
+<a href="#predict">Prediction Functions</a> </li>
+<li>
+<a href="#notes">Notes</a> </li>
+<li>
+<a href="#examples">Examples</a> </li>
+<li>
+<a href="#literature">Literature</a> </li>
+<li>
+<a href="#related">Related Topics</a> </li>
+</ul>
+</div><dl class="section warning"><dt>Warning</dt><dd><em> This MADlib method is still in early stage development. There may be some issues that will be addressed in a future version. Interface and implementation is subject to change. </em></dd></dl>
+<p>Support vector machines (SVMs) and related kernel methods have been among the most popular and well-studied machine learning techniques of the past 15 years, with an amazing number of innovations and applications.</p>
+<p>In a nutshell, an SVM model \(f(x)\) takes the form of </p><p class="formulaDsp">
+\[ f(x) = \sum_i \alpha_i k(x_i,x), \]
+</p>
+<p> where each \( \alpha_i \) is a real number, each \( \boldsymbol x_i \) is a data point from the training set (called a support vector), and \( k(\cdot, \cdot) \) is a kernel function that measures how "similar" two objects are. In regression, \( f(\boldsymbol x) \) is the regression function we seek. In classification, \( f(\boldsymbol x) \) serves as the decision boundary; so for example in binary classification, the predictor can output class 1 for object \(x\) if \( f(\boldsymbol x) \geq 0 \), and class 2 otherwise.</p>
+<p>In the case when the kernel function \( k(\cdot, \cdot) \) is the standard inner product on vectors, \( f(\boldsymbol x) \) is just an alternative way of writing a linear function </p><p class="formulaDsp">
+\[ f&#39;(\boldsymbol x) = \langle \boldsymbol w, \boldsymbol x \rangle, \]
+</p>
+<p> where \( \boldsymbol w \) is a weight vector having the same dimension as \( \boldsymbol x \). One of the key points of SVMs is that we can use more fancy kernel functions to efficiently learn linear models in high-dimensional feature spaces, since \( k(\boldsymbol x_i, \boldsymbol x_j) \) can be understood as an efficient way of computing an inner product in the feature space: </p><p class="formulaDsp">
+\[ k(\boldsymbol x_i, \boldsymbol x_j) = \langle \phi(\boldsymbol x_i), \phi(\boldsymbol x_j) \rangle, \]
+</p>
+<p> where \( \phi(\boldsymbol x) \) projects \( \boldsymbol x \) into a (possibly infinite-dimensional) feature space.</p>
+<p>There are many algorithms for learning kernel machines. This module implements the class of online learning with kernels algorithms described in Kivinen et al. [1]. It also includes the incremental gradient descent (IGD) method Feng et al. [3] for learning linear SVMs with the Hinge loss \(l(z) = \max(0, 1-z)\). See also the book Scholkopf and Smola [2] for more details of SVMs in general.</p>
+<p>The IGD implementation is based on Bismarck project in University of Wisconsin (<a href="http://hazy.cs.wisc.edu/hazy/victor/bismarck/">http://hazy.cs.wisc.edu/hazy/victor/bismarck/</a>). The methods introduced in [1] are implemented according to their original descriptions, except that we only update the support vector model when we make a significant error. The original algorithms in [1] update the support vector model at every step, even when no error was made, in the name of regularization. For practical purposes, and this is verified empirically to a certain degree, updating only when necessary is both faster and better from a learning-theoretic point of view, at least in the i.i.d. setting.</p>
+<p>Methods for classification, regression and novelty detection are available. Multiple instances of the algorithms can be executed in parallel on different subsets of the training data. The resultant support vector models can then be combined using standard techniques like averaging or majority voting.</p>
+<p>Training data points are accessed via a table or a view. The support vector models can also be stored in tables for fast execution.</p>
+<p><a class="anchor" id="learn"></a></p><dl class="section user"><dt>Regression Learning Function</dt><dd></dd></dl>
+<p>Regression learning is achieved through the following function: </p><pre class="syntax">
+svm_regression( input_table,
+                model_table,
+                parallel,
+                kernel_func,
+                verbose DEFAULT false,
+                eta DEFAULT 0.1,
+                nu DEFAULT 0.005,
+                slambda DEFAULT 0.05,
+                kernel_param DEFAULT 1.0
+              )</pre><p>For classification and regression, the training table/view is expected to be of the following form (the array size of <em>ind</em> must not be greater than 102,400.):<br />
+</p><pre>{TABLE|VIEW} input_table (
+    ...
+    id INT,
+    ind FLOAT8[],
+    label FLOAT8,
+    ...
+)</pre><p>For novelty detection, the label field is not required. Also note that the column names of input_table requires to be exactly the same as described above. This limitation will be removed when this module graduates from early development stage.</p>
+<p><a class="anchor" id="classify"></a></p><dl class="section user"><dt>Classification Learning Function</dt><dd></dd></dl>
+<p>Classification learning is achieved through the following two functions:</p>
+<ul>
+<li>Learn linear SVM(s) using IGD [3]. <pre class="syntax">
+lsvm_classification( input_table,
+                     model_table,
+                     parallel DEFAULT false,
+                     verbose DEFAULT false,
+                     eta DEFAULT 0.1,
+                     reg DEFAULT 0.001,
+                     max_iter DEFAULT 100
+                   )
+</pre> Note that, as any gradient descent methods, IGD will converge with a greater eta (stepsize), thus faster, if the input training data is well-conditioned. We highly recommend user to perform data preparation, such that the mean value of each feature column is 0 and standard error is 1, and append an extra feature with constant value 1 for intercept term. We plan to provide a function for this when this module graduates from early stage development.</li>
+<li>Learn linear or non-linear SVM(s) using the method described in [1]. <pre class="syntax">
+svm_classification( input_table,
+                    model_table,
+                    parallel,
+                    kernel_func,
+                    verbose DEFAULT false,
+                    eta DEFAULT 0.1,
+                    nu DEFAULT 0.005,
+                    kernel_param DEFAULT 1.0
+                  )
+</pre></li>
+</ul>
+<p><a class="anchor" id="novelty"></a></p><dl class="section user"><dt>Novelty Detection Function</dt><dd></dd></dl>
+<p>Novelty detection is achieved through the following function: </p><pre class="syntax">
+svm_novelty_detection( input_table,
+                       model_table,
+                       parallel,
+                       kernel_func,
+                       verbose DEFAULT false,
+                       eta DEFAULT 0.1,
+                       nu DEFAULT 0.005,
+                       kernel_param DEFAULT 1.0
+                     )
+</pre><p>Assuming the model_table parameter takes on value 'model', each learning function will produce two tables as output: 'model' and 'model_param'. The first contains the support vectors of the model(s) learned. The second contains the parameters of the model(s) learned, which include information like the kernel function used and the value of the intercept, if there is one.</p>
+<p><a class="anchor" id="predict"></a></p><dl class="section user"><dt>Prediction Functions</dt><dd></dd></dl>
+<ul>
+<li>To make predictions on a single data point x using a single model learned previously, we use the function <pre class="syntax">
+svm_predict_batch( input_table,
+                   data_col,
+                   id_col,
+                   model_table,
+                   output_table,
+                   parallel
+                 )
+</pre> If the <code>parallel</code> parameter is true, then each data point in the input table will have multiple predicted values corresponding to the number of models learned in</li>
+<li>If the model is produced by the <a class="el" href="lsvm_8sql__in.html#a6dcddc88d70523ddda32b46ab82dfbbf" title="This is the linear support vector classification function. ">lsvm_classification()</a> function, use the following prediction function instead. <pre class="syntax">
+lsvm_predict_batch( input_table,
+                    data_col,
+                    id_col,
+                    model_table,
+                    output_table,
+                  )
+</pre></li>
+<li>Note that, to make predictions on a subset of data points stored in a table, a separated view or table needs to be created ahead of time: <pre class="example">
+-- create subset as a view
+CREATE VIEW subset AS SELECT * FROM input_table WHERE id &lt;= 100;
+-- prediction on the subset
+SELECT svm_predict_batch('subset', 'ind', 'id',
+    'svm_model', 'subset_svm_predict', false);
+-- prediction using linear SVMs
+SELECT lsvm_predict_batch('subset', 'ind', 'id',
+    'lsvm_model', 'subset_lsvm_predict');
+</pre></li>
+</ul>
+<p><a class="anchor" id="notes"></a></p><dl class="section user"><dt>Notes</dt><dd></dd></dl>
+<p>The <code>kernel_func</code> argument of <code>svm_classification</code>, <code>svm_regression</code>, and <code>svm_novelty_detection</code> can only accept a kernel function in the following form:</p>
+<pre class="syntax">
+FLOAT8 kernel_function(FLOAT8[], FLOAT8[], FLOAT8)
+</pre><p>The first two parameters are feature vectors, and the third one is a control parameter for the kernel function. The value of the control parameter must be set throught the <code>kernel_param</code> argument of <code>svm_classification</code>, <code>svm_regression</code>, and <code>svm_novelty_detection</code>.</p>
+<p>Currently, three kernel functions have been implemented: linear or dot product (<a class="el" href="online__sv_8sql__in.html#ab53cac5790dafd7230359e08c72af4f1">svm_dot</a>), polynomial (<a class="el" href="online__sv_8sql__in.html#a1ac76fdf9623e0a4db47665f2a80be90">svm_polynomial</a>) and Gaussian (<a class="el" href="online__sv_8sql__in.html#a9f2a96e1a241ecc66386a78b110777d3">svm_gaussian</a>) kernels. Note that for the dot product kernel, it actually only requires two FLOAT8[] parameters. To be compliant with the requirements for the kernel function, we have an overloaded version of <code>svm_dot</code> which accepts two FLOAT8[] and one FLOAT8 and returns a FLOAT8, but the FLOAT8 parameter is simply a placeholder and will be ignored.</p>
+<p>With the HAWQ database, only the above pre-built kernel functions can be used. With the Greenplum database and PostgreSQL database, one can use any user-defined function as long as it conforms to the requirements for the kernel function.</p>
+<p><a class="anchor" id="examples"></a></p><dl class="section user"><dt>Examples</dt><dd></dd></dl>
+<p>As a general first step, prepare and populate an input table/view with the following structure: </p><pre class="example">
+CREATE TABLE/VIEW my_schema.my_input_table(
+    id    INT,       -- point ID
+    ind   FLOAT8[],  -- data point
+    label FLOAT8     -- label of data point
+);
+</pre><p> The label field is not required for novelty detection.</p>
+<p><b>Example usage for regression</b>:</p><ol type="1">
+<li>Randomly generate 1000 5-dimensional data labelled using the simple target function. <pre class="example">
+t(x) = if x[5] = 10 then 50 else if x[5] = -10 then 50 else 0;
+</pre> and store that in the my_schema.my_train_data table as follows: <pre class="example">
+SELECT madlib.svm_generate_reg_data(
+    'my_schema.my_train_data', 1000, 5);
+</pre></li>
+<li>Learn a regression model and store the resultant model under the name 'myexp'. <pre class="example">
+SELECT madlib.svm_regression('my_schema.my_train_data',
+    'myexp1', false, 'madlib.svm_dot');
+</pre></li>
+<li>To learn multiple support vector models, we replace the learning step above by <pre class="example">
+SELECT madlib.svm_regression('my_schema.my_train_data',
+    'myexp2', true, 'madlib.svm_dot');
+</pre></li>
+<li>We can also predict the labels of data points stored in a table. For example, we can execute the following: <pre class="example">
+-- prepare test data
+CREATE TABLE madlib.svm_reg_test AS
+    SELECT id, ind
+    FROM my_schema.my_train_data
+    LIMIT 20;
+-- prediction using a single model
+SELECT madlib.svm_predict_batch('madlib.svm_reg_test', 'ind', 'id',
+    'myexp1', 'madlib.svm_reg_output1', false);
+SELECT * FROM madlib.svm_reg_output1;
+-- prediction using multiple models
+SELECT madlib.svm_predict_batch('madlib.svm_reg_test', 'ind', 'id',
+    'myexp2', 'madlib.svm_reg_output2', true);
+SELECT * FROM madlib.svm_reg_output2;
+</pre></li>
+</ol>
+<p><b>Example usage for classification:</b></p><ol type="1">
+<li>Randomly generate training and testing data labelled by the simple target function. <pre class="example">
+t(x) = if x[1] &gt; 0 and  x[2] &lt; 0 then 1 else -1;
+</pre> and store that in tables as follows: <pre class="example">
+SELECT madlib.svm_generate_cls_data(
+    'my_schema.my_train_data', 2000, 5);
+SELECT madlib.svm_generate_cls_data(
+    'my_schema.my_test_data', 3000, 5);
+</pre></li>
+<li>Learn a classification model and store the resultant model the table 'myexpc'. <pre class="example">
+SELECT madlib.svm_classification('my_schema.my_train_data',
+    'myexpc', false, 'madlib.svm_dot');
+</pre></li>
+<li>Start using the model to predict the labels of testing data points. <pre class="example">
+SELECT madlib.svm_predict_batch('my_schema.my_test_data', 'ind', 'id',
+    'myexpc', 'my_schema.svm_cls_output1', false);
+</pre></li>
+<li>To learn multiple support vector models, replace the model-building and prediction steps above. <pre class="example">
+-- training
+SELECT madlib.svm_classification('my_schema.my_train_data',
+    'myexpc', true, 'madlib.svm_dot');
+-- predicting
+SELECT madlib.svm_predict_batch('my_schema.my_test_data', 'ind', 'id',
+    'myexpc', 'my_schema.svm_cls_output1', true);
+</pre></li>
+<li>To learn a linear support vector model using IGD [3], replace the model-building and prediction steps. <pre class="example">
+-- training
+SELECT madlib.lsvm_classification(
+    'my_schema.my_train_data', 'my_lsvm');
+-- predicting
+SELECT madlib.lsvm_predict_batch('my_schema.my_test_data',
+    'ind', 'id', 'my_lsvm', 'my_lsvm_predict');
+</pre></li>
+</ol>
+<p><b>Example usage for novelty detection:</b></p>
+<ol type="1">
+<li>Randomly generate 100 2-dimensional data (the normal cases) and store that in the my_schema.my_train_data table. <pre class="example">
+SELECT madlib.svm_generate_nd_data(
+    'my_schema.my_train_data', 100, 2);
+</pre></li>
+<li>Learning and predicting using a single novelty detection model: <pre class="example">
+SELECT madlib.svm_novelty_detection( 'my_schema.my_train_data',
+                                    'myexpnd',
+                                    false,
+                                    'madlib.svm_dot'
+                                  );
+SELECT * FROM myexpnd;
+</pre></li>
+<li>Learning and predicting using multiple models can be done as follows: <pre class="example">
+SELECT madlib.svm_novelty_detection( 'my_schema.my_train_data',
+                                    'myexpnd',
+                                    true,
+                                    'madlib.svm_dot'
+                                  );
+SELECT * FROM myexpnd;
+</pre></li>
+</ol>
+<p><b>Model cleanup:</b> To drop all tables pertaining to the model, use: </p><pre class="example">
+SELECT svm_drop_model('model_table');
+</pre><p><a class="anchor" id="literature"></a></p><dl class="section user"><dt>Literature</dt><dd></dd></dl>
+<p>[1] Jyrki Kivinen, Alexander J. Smola, and Robert C. Williamson: <em>Online Learning with Kernels</em>, IEEE Transactions on Signal Processing, 52(8), 2165-2176, 2004.</p>
+<p>[2] Bernhard Scholkopf and Alexander J. Smola: <em>Learning with Kernels: Support Vector Machines, Regularization, Optimization, and Beyond</em>, MIT Press, 2002.</p>
+<p>[3] X. Feng, A. Kumar, B. Recht, and C. R&eacute;: <em>Towards a uniļ¬ed architecture for in-RDBMS analytics</em>, In SIGMOD Conference, 2012.</p>
+<p><a class="anchor" id="related"></a></p><dl class="section user"><dt>Related Topics</dt><dd></dd></dl>
+<p>File <a class="el" href="online__sv_8sql__in.html" title="SQL functions for support vector machines. ">online_sv.sql_in</a> and <a class="el" href="lsvm_8sql__in.html" title="SQL functions for linear support vector machines. ">lsvm.sql_in</a> documenting the SQL functions.</p>
+</div><!-- contents -->
+</div><!-- doc-content -->
+<!-- start footer part -->
+<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
+  <ul>
+    <li class="footer">Generated on Mon Jul 27 2015 20:37:45 for MADlib by
+    <a href="http://www.doxygen.org/index.html">
+    <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.10 </li>
+  </ul>
+</div>
+</body>
+</html>

http://git-wip-us.apache.org/repos/asf/incubator-madlib-site/blob/7258f51a/docs/latest/group__grp__kmeans.html
----------------------------------------------------------------------
diff --git a/docs/latest/group__grp__kmeans.html b/docs/latest/group__grp__kmeans.html
new file mode 100644
index 0000000..83e9b1e
--- /dev/null
+++ b/docs/latest/group__grp__kmeans.html
@@ -0,0 +1,414 @@
+<!-- HTML header for doxygen 1.8.4-->
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<meta name="generator" content="Doxygen 1.8.10"/>
+<meta name="keywords" content="madlib,postgres,greenplum,machine learning,data mining,deep learning,ensemble methods,data science,market basket analysis,affinity analysis,pca,lda,regression,elastic net,huber white,proportional hazards,k-means,latent dirichlet allocation,bayes,support vector machines,svm"/>
+<title>MADlib: k-Means Clustering</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="navtree.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="resize.js"></script>
+<script type="text/javascript" src="navtreedata.js"></script>
+<script type="text/javascript" src="navtree.js"></script>
+<script type="text/javascript">
+  $(document).ready(initResizable);
+  $(window).load(resizeHeight);
+</script>
+<link href="search/search.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="search/searchdata.js"></script>
+<script type="text/javascript" src="search/search.js"></script>
+<script type="text/javascript">
+  $(document).ready(function() { init_search(); });
+</script>
+<script type="text/x-mathjax-config">
+  MathJax.Hub.Config({
+    extensions: ["tex2jax.js", "TeX/AMSmath.js", "TeX/AMSsymbols.js"],
+    jax: ["input/TeX","output/HTML-CSS"],
+});
+</script><script src="../mathjax/MathJax.js"></script>
+<!-- hack in the navigation tree -->
+<script type="text/javascript" src="navtree_hack.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+<link href="madlib_extra.css" rel="stylesheet" type="text/css"/>
+<!-- google analytics -->
+<script>
+  (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
+  (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
+  m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
+  })(window,document,'script','//www.google-analytics.com/analytics.js','ga');
+  ga('create', 'UA-45382226-1', 'auto');
+  ga('send', 'pageview');
+</script>
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td id="projectlogo"><a href="http://madlib.incubator.apache.org"><img alt="Logo" src="madlib.png" height="50" style="padding-left:0.5em;" border="0"/ ></a></td>
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">
+   <span id="projectnumber">1.8</span>
+   </div>
+   <div id="projectbrief">User Documentation for MADlib</div>
+  </td>
+   <td>        <div id="MSearchBox" class="MSearchBoxInactive">
+        <span class="left">
+          <img id="MSearchSelect" src="search/mag_sel.png"
+               onmouseover="return searchBox.OnSearchSelectShow()"
+               onmouseout="return searchBox.OnSearchSelectHide()"
+               alt=""/>
+          <input type="text" id="MSearchField" value="Search" accesskey="S"
+               onfocus="searchBox.OnSearchFieldFocus(true)" 
+               onblur="searchBox.OnSearchFieldFocus(false)" 
+               onkeyup="searchBox.OnSearchFieldChange(event)"/>
+          </span><span class="right">
+            <a id="MSearchClose" href="javascript:searchBox.CloseResultsWindow()"><img id="MSearchCloseImg" border="0" src="search/close.png" alt=""/></a>
+          </span>
+        </div>
+</td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.10 -->
+<script type="text/javascript">
+var searchBox = new SearchBox("searchBox", "search",false,'Search');
+</script>
+</div><!-- top -->
+<div id="side-nav" class="ui-resizable side-nav-resizable">
+  <div id="nav-tree">
+    <div id="nav-tree-contents">
+      <div id="nav-sync" class="sync"></div>
+    </div>
+  </div>
+  <div id="splitbar" style="-moz-user-select:none;" 
+       class="ui-resizable-handle">
+  </div>
+</div>
+<script type="text/javascript">
+$(document).ready(function(){initNavTree('group__grp__kmeans.html','');});
+</script>
+<div id="doc-content">
+<!-- window showing the filter options -->
+<div id="MSearchSelectWindow"
+     onmouseover="return searchBox.OnSearchSelectShow()"
+     onmouseout="return searchBox.OnSearchSelectHide()"
+     onkeydown="return searchBox.OnSearchSelectKey(event)">
+</div>
+
+<!-- iframe showing the search results (closed by default) -->
+<div id="MSearchResultsWindow">
+<iframe src="javascript:void(0)" frameborder="0" 
+        name="MSearchResults" id="MSearchResults">
+</iframe>
+</div>
+
+<div class="header">
+  <div class="headertitle">
+<div class="title">k-Means Clustering<div class="ingroups"><a class="el" href="group__grp__unsupervised.html">Unsupervised Learning</a> &raquo; <a class="el" href="group__grp__clustering.html">Clustering</a></div></div>  </div>
+</div><!--header-->
+<div class="contents">
+<div class="toc"><b>Contents</b> </p><ul>
+<li class="level1">
+<a href="#train">Training Function</a> </li>
+<li class="level1">
+<a href="#output">Output Format</a> </li>
+<li class="level1">
+<a href="#assignment">Cluster Assignment</a> </li>
+<li class="level1">
+<a href="#examples">Examples</a> </li>
+<li class="level1">
+<a href="#notes">Notes</a> </li>
+<li class="level1">
+<a href="#background">Technical Background</a> </li>
+<li class="level1">
+<a href="#literature">Literature</a> </li>
+<li class="level1">
+<a href="#related">Related Topics</a> </li>
+</ul>
+</div><p>Clustering refers to the problem of partitioning a set of objects according to some problem-dependent measure of <em>similarity</em>. In the k-means variant, given \( n \) points \( x_1, \dots, x_n \in \mathbb R^d \), the goal is to position \( k \) centroids \( c_1, \dots, c_k \in \mathbb R^d \) so that the sum of <em>distances</em> between each point and its closest centroid is minimized. Each centroid represents a cluster that consists of all points to which this centroid is closest.</p>
+<p><a class="anchor" id="train"></a></p><dl class="section user"><dt>Training Function</dt><dd></dd></dl>
+<p>The k-means algorithm can be invoked in four ways, depending on the source of the initial set of centroids:</p>
+<ul>
+<li>Use the random centroid seeding method. <pre class="syntax">
+kmeans_random( rel_source,
+               expr_point,
+               k,
+               fn_dist,
+               agg_centroid,
+               max_num_iterations,
+               min_frac_reassigned
+             )
+</pre></li>
+<li>Use the kmeans++ centroid seeding method. <pre class="syntax">
+kmeanspp( rel_source,
+          expr_point,
+          k,
+          fn_dist,
+          agg_centroid,
+          max_num_iterations,
+          min_frac_reassigned,
+          seeding_sample_ratio
+        )
+</pre></li>
+<li>Supply an initial centroid set in a relation identified by the <em>rel_initial_centroids</em> argument. <pre class="syntax">
+kmeans( rel_source,
+        expr_point,
+        rel_initial_centroids,
+        expr_centroid,
+        fn_dist,
+        agg_centroid,
+        max_num_iterations,
+        min_frac_reassigned
+      )
+</pre></li>
+<li>Provide an initial centroid set as an array expression in the <em>initial_centroids</em> argument. <pre class="syntax">
+kmeans( rel_source,
+        expr_point,
+        initial_centroids,
+        fn_dist,
+        agg_centroid,
+        max_num_iterations,
+        min_frac_reassigned
+      )
+</pre> <b>Arguments</b> <dl class="arglist">
+<dt>rel_source </dt>
+<dd><p class="startdd">TEXT. The name of the table containing the input data points.</p>
+<p>Data points and predefined centroids (if used) are expected to be stored row-wise, in a column of type <code><a class="el" href="group__grp__svec.html">SVEC</a></code> (or any type convertible to <code><a class="el" href="group__grp__svec.html">SVEC</a></code>, like <code>FLOAT[]</code> or <code>INTEGER[]</code>). Data points with non-finite values (NULL, NaN, infinity) in any component are skipped during analysis. </p>
+<p class="enddd"></p>
+</dd>
+<dt>expr_point </dt>
+<dd><p class="startdd">TEXT. The name of the column with point coordinates.</p>
+<p class="enddd"></p>
+</dd>
+<dt>k </dt>
+<dd><p class="startdd">INTEGER. The number of centroids to calculate.</p>
+<p class="enddd"></p>
+</dd>
+<dt>fn_dist (optional) </dt>
+<dd><p class="startdd">TEXT, default: squared_dist_norm2'. The name of the function to use to calculate the distance from a data point to a centroid.</p>
+<p>The following distance functions can be used (computation of barycenter/mean in parentheses): </p><ul>
+<li>
+<b><a class="el" href="linalg_8sql__in.html#aad193850e79c4b9d811ca9bc53e13476">dist_norm1</a></b>: 1-norm/Manhattan (element-wise median [Note that MADlib does not provide a median aggregate function for support and performance reasons.]) </li>
+<li>
+<b><a class="el" href="linalg_8sql__in.html#aa58e51526edea6ea98db30b6f250adb4">dist_norm2</a></b>: 2-norm/Euclidean (element-wise mean) </li>
+<li>
+<b><a class="el" href="linalg_8sql__in.html#a00a08e69f27524f2096032214e15b668">squared_dist_norm2</a></b>: squared Euclidean distance (element-wise mean) </li>
+<li>
+<b><a class="el" href="linalg_8sql__in.html#a8c7b9281a72ff22caf06161701b27e84">dist_angle</a></b>: angle (element-wise mean of normalized points) </li>
+<li>
+<b><a class="el" href="linalg_8sql__in.html#afa13b4c6122b99422d666dedea136c18">dist_tanimoto</a></b>: tanimoto (element-wise mean of normalized points <a href="#kmeans-lit-5">[5]</a>) </li>
+<li>
+<b>user defined function</b> with signature <code>DOUBLE PRECISION[] x, DOUBLE PRECISION[] y -&gt; DOUBLE PRECISION</code></li>
+</ul>
+<p class="enddd"></p>
+</dd>
+<dt>agg_centroid (optional) </dt>
+<dd><p class="startdd">TEXT, default: 'avg'. The name of the aggregate function used to determine centroids.</p>
+<p>The following aggregate functions can be used:</p><ul>
+<li>
+<b><a class="el" href="linalg_8sql__in.html#a1aa37f73fb1cd8d7d106aa518dd8c0b4">avg</a></b>: average (Default) </li>
+<li>
+<b><a class="el" href="linalg_8sql__in.html#a0b04663ca206f03e66aed5ea2b4cc461">normalized_avg</a></b>: normalized average</li>
+</ul>
+<p class="enddd"></p>
+</dd>
+<dt>max_num_iterations (optional) </dt>
+<dd><p class="startdd">INTEGER, default: 20. The maximum number of iterations to perform.</p>
+<p class="enddd"></p>
+</dd>
+<dt>min_frac_reassigned (optional) </dt>
+<dd><p class="startdd">DOUBLE PRECISION, default: 0.001. The minimum fraction of centroids reassigned to continue iterating. When fewer than this fraction of centroids are reassigned in an iteration, the calculation completes.</p>
+<p class="enddd"></p>
+</dd>
+<dt>seeding_sample_ratio (optional) </dt>
+<dd><p class="startdd">DOUBLE PRECISION, default: 1.0. The proportion of subsample of original dataset to use for kmeans++ centroid seeding method. Kmeans++ scans through the data sequentially 'k' times and can be too slow for big datasets. When 'seeding_sample_ratio' is greater than 0 (thresholded to be maximum value of 1.0), the seeding is run on an uniform random subsample of the data. Note: the final K-means algorithm is run on the complete dataset. This parameter only builds a subsample for the seeding and is only available for kmeans++.</p>
+<p class="enddd"></p>
+</dd>
+<dt>rel_initial_centroids </dt>
+<dd><p class="startdd">TEXT. The set of initial centroids. The centroid relation is expected to be of the following form: </p><pre>
+{TABLE|VIEW} rel_initial_centroids (
+    ...
+    expr_centroid DOUBLE PRECISION[],
+    ...
+)
+</pre><p> where <em>expr_centroid</em> is the name of a column with coordinates. </p>
+<p class="enddd"></p>
+</dd>
+<dt>expr_centroid </dt>
+<dd><p class="startdd">TEXT. The name of the column in the <em>rel_initial_centroids</em> relation that contains the centroid coordinates.</p>
+<p class="enddd"></p>
+</dd>
+<dt>initial_centroids </dt>
+<dd>TEXT. A string containing a DOUBLE PRECISION array expression with the initial centroid coordinates. </dd>
+</dl>
+</li>
+</ul>
+<p><a class="anchor" id="output"></a></p><dl class="section user"><dt>Output Format</dt><dd></dd></dl>
+<p>The output of the k-means module is a composite type with the following columns: </p><table  class="output">
+<tr>
+<th>centroids </th><td>DOUBLE PRECISION[][]. The final centroid positions.  </td></tr>
+<tr>
+<th>objective_fn </th><td>DOUBLE PRECISION. The value of the objective function.  </td></tr>
+<tr>
+<th>frac_reassigned </th><td>DOUBLE PRECISION. The fraction of points reassigned in the last iteration.  </td></tr>
+<tr>
+<th>num_iterations </th><td>INTEGER. The total number of iterations executed.  </td></tr>
+</table>
+<p><a class="anchor" id="assignment"></a></p><dl class="section user"><dt>Cluster Assignment</dt><dd></dd></dl>
+<p>After training, the cluster assignment for each data point can be computed with the help of the following function:</p>
+<pre class="syntax">
+closest_column( m, x )
+</pre><p><b>Argument</b> </p><dl class="arglist">
+<dt>m </dt>
+<dd>DOUBLE PRECISION[][]. The learned centroids from the training function. </dd>
+<dt>x </dt>
+<dd>DOUBLE PRECISION[]. The data point. </dd>
+</dl>
+<p><b>Output format</b> </p><table  class="output">
+<tr>
+<th>column_id </th><td>INTEGER. The cluster assignment (zero-based). </td></tr>
+<tr>
+<th>distance </th><td>DOUBLE PRECISION. The distance to the cluster centroid. </td></tr>
+</table>
+<p><a class="anchor" id="examples"></a></p><dl class="section user"><dt>Examples</dt><dd></dd></dl>
+<ol type="1">
+<li>Prepare some input data. <pre class="example">
+CREATE TABLE public.km_sample(pid int, points double precision[]);
+COPY km_sample (pid, points) FROM stdin DELIMITER '|';
+1 | {14.23, 1.71, 2.43, 15.6, 127, 2.8, 3.0600, 0.2800, 2.29, 5.64, 1.04, 3.92, 1065}
+2 | {13.2, 1.78, 2.14, 11.2, 1, 2.65, 2.76, 0.26, 1.28, 4.38, 1.05, 3.49, 1050}
+3 | {13.16, 2.36,  2.67, 18.6, 101, 2.8,  3.24, 0.3, 2.81, 5.6799, 1.03, 3.17, 1185}
+4 | {14.37, 1.95, 2.5, 16.8, 113, 3.85, 3.49, 0.24, 2.18, 7.8, 0.86, 3.45, 1480}
+5 | {13.24, 2.59, 2.87, 21, 118, 2.8, 2.69, 0.39, 1.82, 4.32, 1.04, 2.93, 735}
+6 | {14.2, 1.76, 2.45, 15.2, 112, 3.27, 3.39, 0.34, 1.97, 6.75, 1.05, 2.85, 1450}
+7 | {14.39, 1.87, 2.45, 14.6, 96, 2.5, 2.52, 0.3, 1.98, 5.25, 1.02, 3.58, 1290}
+8 | {14.06, 2.15, 2.61, 17.6, 121, 2.6, 2.51, 0.31, 1.25, 5.05, 1.06, 3.58, 1295}
+9 | {14.83, 1.64, 2.17, 14, 97, 2.8, 2.98, 0.29, 1.98, 5.2, 1.08, 2.85, 1045}
+10 | {13.86, 1.35, 2.27, 16, 98, 2.98, 3.15, 0.22, 1.8500, 7.2199, 1.01, 3.55, 1045}
+\.
+</pre></li>
+<li>Run k-means clustering using kmeans++ for centroid seeding: <pre class="example">
+\x on;
+SELECT * FROM madlib.kmeanspp( 'km_sample',
+                               'points',
+                               2,
+                               'madlib.squared_dist_norm2',
+                               'madlib.avg',
+                               20,
+                               0.001
+                             );
+</pre> Result: <pre class="result">
+centroids       | {{13.872,1.814,2.376,15.56,88.2,2.806,2.928,0.288,1.844,5.35198,1.044,3.348,988},
+                   {14.036,2.018,2.536,16.56,108.6,3.004,3.03,0.298,2.038,6.10598,1.004,3.326,1340}}
+objective_fn    | 151184.962672
+frac_reassigned | 0
+num_iterations  | 2
+</pre></li>
+<li>Calculate the simplified silhouette coefficient: <pre class="example">
+SELECT * FROM madlib.simple_silhouette( 'km_sample',
+                                        'points',
+                                        (SELECT centroids FROM
+                                            madlib.kmeanspp('km_sample',
+                                                            'points',
+                                                            2,
+                                                            'madlib.squared_dist_norm2',
+                                                            'madlib.avg',
+                                                            20,
+                                                            0.001)),
+                                        'madlib.dist_norm2'
+                                      );
+</pre> Result: <pre class="result">
+simple_silhouette | 0.68978804882941
+</pre></li>
+<li>Find the cluster assignment for each point <pre class="example">
+\x off;
+SELECT data.*, (madlib.closest_column(centroids, points)).column_id as cluster_id
+FROM public.km_sample as data,
+     (SELECT centroids
+      FROM madlib.kmeanspp('km_sample', 'points', 2,
+                           'madlib.squared_dist_norm2',
+                           'madlib.avg', 20, 0.001)) as centroids
+ORDER BY data.pid;
+</pre> <pre class="result">
+ pid |                               points                               | cluster_id
+-----+--------------------------------------------------------------------+------------
+   1 | {14.23,1.71,2.43,15.6,127,2.8,3.06,0.28,2.29,5.64,1.04,3.92,1065}  |          0
+   2 | {13.2,1.78,2.14,11.2,1,2.65,2.76,0.26,1.28,4.38,1.05,3.49,1050}    |          0
+   3 | {13.16,2.36,2.67,18.6,101,2.8,3.24,0.3,2.81,5.6799,1.03,3.17,1185} |          1
+   4 | {14.37,1.95,2.5,16.8,113,3.85,3.49,0.24,2.18,7.8,0.86,3.45,1480}   |          1
+   5 | {13.24,2.59,2.87,21,118,2.8,2.69,0.39,1.82,4.32,1.04,2.93,735}     |          0
+   6 | {14.2,1.76,2.45,15.2,112,3.27,3.39,0.34,1.97,6.75,1.05,2.85,1450}  |          1
+   7 | {14.39,1.87,2.45,14.6,96,2.5,2.52,0.3,1.98,5.25,1.02,3.58,1290}    |          1
+   8 | {14.06,2.15,2.61,17.6,121,2.6,2.51,0.31,1.25,5.05,1.06,3.58,1295}  |          1
+   9 | {14.83,1.64,2.17,14,97,2.8,2.98,0.29,1.98,5.2,1.08,2.85,1045}      |          0
+  10 | {13.86,1.35,2.27,16,98,2.98,3.15,0.22,1.85,7.2199,1.01,3.55,1045}  |          0
+</pre></li>
+</ol>
+<p><a class="anchor" id="notes"></a></p><dl class="section user"><dt>Notes</dt><dd></dd></dl>
+<p>The algorithm stops when one of the following conditions is met:</p><ul>
+<li>The fraction of updated points is smaller than the convergence threshold (<em>min_frac_reassigned</em> argument). (Default: 0.001).</li>
+<li>The algorithm reaches the maximum number of allowed iterations (<em>max_num_iterations</em> argument). (Default: 20).</li>
+</ul>
+<p>A popular method to assess the quality of the clustering is the <em>silhouette coefficient</em>, a simplified version of which is provided as part of the k-means module. Note that for large data sets, this computation is expensive.</p>
+<p>The silhouette function has the following syntax: </p><pre class="syntax">
+simple_silhouette( rel_source,
+                   expr_point,
+                   centroids,
+                   fn_dist
+                 )
+</pre><p> <b>Arguments</b> </p><dl class="arglist">
+<dt>rel_source </dt>
+<dd>TEXT. The name of the relation containing the input point. </dd>
+<dt>expr_point </dt>
+<dd>TEXT. An expression evaluating to point coordinates for each row in the relation. </dd>
+<dt>centroids </dt>
+<dd>TEXT. An expression evaluating to an array of centroids.  </dd>
+<dt>fn_dist (optional) </dt>
+<dd>TEXT, default 'dist_norm2', The name of a function to calculate the distance of a point from a centroid. See the <em>fn_dist</em> argument of the k-means training function. </dd>
+</dl>
+<p><a class="anchor" id="background"></a></p><dl class="section user"><dt>Technical Background</dt><dd></dd></dl>
+<p>Formally, we wish to minimize the following objective function: </p><p class="formulaDsp">
+\[ (c_1, \dots, c_k) \mapsto \sum_{i=1}^n \min_{j=1}^k \operatorname{dist}(x_i, c_j) \]
+</p>
+<p> In the most common case, \( \operatorname{dist} \) is the square of the Euclidean distance.</p>
+<p>This problem is computationally difficult (NP-hard), yet the local-search heuristic proposed by Lloyd [4] performs reasonably well in practice. In fact, it is so ubiquitous today that it is often referred to as the <em>standard algorithm</em> or even just the <em>k-means algorithm</em> [1]. It works as follows:</p>
+<ol type="1">
+<li>Seed the \( k \) centroids (see below)</li>
+<li>Repeat until convergence:<ol type="a">
+<li>Assign each point to its closest centroid</li>
+<li>Move each centroid to a position that minimizes the sum of distances in this cluster</li>
+</ol>
+</li>
+<li>Convergence is achieved when no points change their assignments during step 2a.</li>
+</ol>
+<p>Since the objective function decreases in every step, this algorithm is guaranteed to converge to a local optimum.</p>
+<p><a class="anchor" id="literature"></a></p><dl class="section user"><dt>Literature</dt><dd></dd></dl>
+<p><a class="anchor" id="kmeans-lit-1"></a>[1] Wikipedia, K-means Clustering, <a href="http://en.wikipedia.org/wiki/K-means_clustering">http://en.wikipedia.org/wiki/K-means_clustering</a></p>
+<p><a class="anchor" id="kmeans-lit-2"></a>[2] David Arthur, Sergei Vassilvitskii: k-means++: the advantages of careful seeding, Proceedings of the 18th Annual ACM-SIAM Symposium on Discrete Algorithms (SODA'07), pp. 1027-1035, <a href="http://www.stanford.edu/~darthur/kMeansPlusPlus.pdf">http://www.stanford.edu/~darthur/kMeansPlusPlus.pdf</a></p>
+<p><a class="anchor" id="kmeans-lit-3"></a>[3] E. R. Hruschka, L. N. C. Silva, R. J. G. B. Campello: Clustering Gene-Expression Data: A Hybrid Approach that Iterates Between k-Means and Evolutionary Search. In: Studies in Computational Intelligence - Hybrid Evolutionary Algorithms. pp. 313-335. Springer. 2007.</p>
+<p><a class="anchor" id="kmeans-lit-4"></a>[4] Lloyd, Stuart: Least squares quantization in PCM. Technical Note, Bell Laboratories. Published much later in: IEEE Transactions on Information Theory 28(2), pp. 128-137. 1982.</p>
+<p><a class="anchor" id="kmeans-lit-5"></a>[5] Leisch, Friedrich: A Toolbox for K-Centroids Cluster Analysis. In: Computational Statistics and Data Analysis, 51(2). pp. 526-544. 2006.</p>
+<p><a class="anchor" id="related"></a></p><dl class="section user"><dt>Related Topics</dt><dd></dd></dl>
+<p>File <a class="el" href="kmeans_8sql__in.html" title="Set of functions for k-means clustering. ">kmeans.sql_in</a> documenting the k-Means SQL functions</p>
+<p><a class="el" href="group__grp__svec.html">Sparse Vectors</a></p>
+<p>simple_silhouette()</p>
+</div><!-- contents -->
+</div><!-- doc-content -->
+<!-- start footer part -->
+<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
+  <ul>
+    <li class="footer">Generated on Mon Jul 27 2015 20:37:45 for MADlib by
+    <a href="http://www.doxygen.org/index.html">
+    <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.10 </li>
+  </ul>
+</div>
+</body>
+</html>