You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@madlib.apache.org by ri...@apache.org on 2016/03/30 02:58:51 UTC

[11/51] [partial] incubator-madlib-site git commit: Add all files from old site (madlib.net)

http://git-wip-us.apache.org/repos/asf/incubator-madlib-site/blob/7258f51a/docs/latest/group__grp__lda.html
----------------------------------------------------------------------
diff --git a/docs/latest/group__grp__lda.html b/docs/latest/group__grp__lda.html
new file mode 100644
index 0000000..ab5e82a
--- /dev/null
+++ b/docs/latest/group__grp__lda.html
@@ -0,0 +1,462 @@
+<!-- HTML header for doxygen 1.8.4-->
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<meta name="generator" content="Doxygen 1.8.10"/>
+<meta name="keywords" content="madlib,postgres,greenplum,machine learning,data mining,deep learning,ensemble methods,data science,market basket analysis,affinity analysis,pca,lda,regression,elastic net,huber white,proportional hazards,k-means,latent dirichlet allocation,bayes,support vector machines,svm"/>
+<title>MADlib: Latent Dirichlet Allocation</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="navtree.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="resize.js"></script>
+<script type="text/javascript" src="navtreedata.js"></script>
+<script type="text/javascript" src="navtree.js"></script>
+<script type="text/javascript">
+  $(document).ready(initResizable);
+  $(window).load(resizeHeight);
+</script>
+<link href="search/search.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="search/searchdata.js"></script>
+<script type="text/javascript" src="search/search.js"></script>
+<script type="text/javascript">
+  $(document).ready(function() { init_search(); });
+</script>
+<script type="text/x-mathjax-config">
+  MathJax.Hub.Config({
+    extensions: ["tex2jax.js", "TeX/AMSmath.js", "TeX/AMSsymbols.js"],
+    jax: ["input/TeX","output/HTML-CSS"],
+});
+</script><script src="../mathjax/MathJax.js"></script>
+<!-- hack in the navigation tree -->
+<script type="text/javascript" src="navtree_hack.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+<link href="madlib_extra.css" rel="stylesheet" type="text/css"/>
+<!-- google analytics -->
+<script>
+  (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
+  (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
+  m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
+  })(window,document,'script','//www.google-analytics.com/analytics.js','ga');
+  ga('create', 'UA-45382226-1', 'auto');
+  ga('send', 'pageview');
+</script>
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td id="projectlogo"><a href="http://madlib.incubator.apache.org"><img alt="Logo" src="madlib.png" height="50" style="padding-left:0.5em;" border="0"/ ></a></td>
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">
+   <span id="projectnumber">1.8</span>
+   </div>
+   <div id="projectbrief">User Documentation for MADlib</div>
+  </td>
+   <td>        <div id="MSearchBox" class="MSearchBoxInactive">
+        <span class="left">
+          <img id="MSearchSelect" src="search/mag_sel.png"
+               onmouseover="return searchBox.OnSearchSelectShow()"
+               onmouseout="return searchBox.OnSearchSelectHide()"
+               alt=""/>
+          <input type="text" id="MSearchField" value="Search" accesskey="S"
+               onfocus="searchBox.OnSearchFieldFocus(true)" 
+               onblur="searchBox.OnSearchFieldFocus(false)" 
+               onkeyup="searchBox.OnSearchFieldChange(event)"/>
+          </span><span class="right">
+            <a id="MSearchClose" href="javascript:searchBox.CloseResultsWindow()"><img id="MSearchCloseImg" border="0" src="search/close.png" alt=""/></a>
+          </span>
+        </div>
+</td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.10 -->
+<script type="text/javascript">
+var searchBox = new SearchBox("searchBox", "search",false,'Search');
+</script>
+</div><!-- top -->
+<div id="side-nav" class="ui-resizable side-nav-resizable">
+  <div id="nav-tree">
+    <div id="nav-tree-contents">
+      <div id="nav-sync" class="sync"></div>
+    </div>
+  </div>
+  <div id="splitbar" style="-moz-user-select:none;" 
+       class="ui-resizable-handle">
+  </div>
+</div>
+<script type="text/javascript">
+$(document).ready(function(){initNavTree('group__grp__lda.html','');});
+</script>
+<div id="doc-content">
+<!-- window showing the filter options -->
+<div id="MSearchSelectWindow"
+     onmouseover="return searchBox.OnSearchSelectShow()"
+     onmouseout="return searchBox.OnSearchSelectHide()"
+     onkeydown="return searchBox.OnSearchSelectKey(event)">
+</div>
+
+<!-- iframe showing the search results (closed by default) -->
+<div id="MSearchResultsWindow">
+<iframe src="javascript:void(0)" frameborder="0" 
+        name="MSearchResults" id="MSearchResults">
+</iframe>
+</div>
+
+<div class="header">
+  <div class="headertitle">
+<div class="title">Latent Dirichlet Allocation<div class="ingroups"><a class="el" href="group__grp__unsupervised.html">Unsupervised Learning</a> &raquo; <a class="el" href="group__grp__topic__modelling.html">Topic Modelling</a></div></div>  </div>
+</div><!--header-->
+<div class="contents">
+<div class="toc"><b>Contents</b> </p><ul>
+<li>
+<a href="#vocabulary">Vocabulary Format</a> </li>
+<li>
+<a href="#train">Training Function</a> </li>
+<li>
+<a href="#predict">Prediction Function</a> </li>
+<li>
+<a href="#examples">Examples</a> </li>
+<li>
+<a href="#notes">Notes</a> </li>
+<li>
+<a href="#literature">Literature</a> </li>
+<li>
+<a href="#related">Related Topics</a></li>
+<li>
+</li>
+</ul>
+</div><p>Latent Dirichlet Allocation (LDA) is an interesting generative probabilistic model for natural texts and has received a lot of attention in recent years. The model is quite versatile, having found uses in problems like automated topic discovery, collaborative filtering, and document classification.</p>
+<p>The LDA model posits that each document is associated with a mixture of various topics (e.g. a document is related to Topic 1 with probability 0.7, and Topic 2 with probability 0.3), and that each word in the document is attributable to one of the document's topics. There is a (symmetric) Dirichlet prior with parameter \( \alpha \) on each document's topic mixture. In addition, there is another (symmetric) Dirichlet prior with parameter \( \beta \) on the distribution of words for each topic.</p>
+<p>The following generative process then defines a distribution over a corpus of documents.</p>
+<ul>
+<li>Sample for each topic \( i \), a per-topic word distribution \( \phi_i \) from the Dirichlet( \(\beta\)) prior.</li>
+<li>For each document:<ul>
+<li>Sample a document length N from a suitable distribution, say, Poisson.</li>
+<li>Sample a topic mixture \( \theta \) for the document from the Dirichlet( \(\alpha\)) distribution.</li>
+<li>For each of the N words:<ul>
+<li>Sample a topic \( z_n \) from the multinomial topic distribution \( \theta \).</li>
+<li>Sample a word \( w_n \) from the multinomial word distribution \( \phi_{z_n} \) associated with topic \( z_n \).</li>
+</ul>
+</li>
+</ul>
+</li>
+</ul>
+<p>In practice, only the words in each document are observable. The topic mixture of each document and the topic for each word in each document are latent unobservable variables that need to be inferred from the observables, and this is the problem people refer to when they talk about the inference problem for LDA. Exact inference is intractable, but several approximate inference algorithms for LDA have been developed. The simple and effective Gibbs sampling algorithm described in Griffiths and Steyvers [2] appears to be the current algorithm of choice.</p>
+<p>This implementation provides a parallel and scalable in-database solution for LDA based on Gibbs sampling. Different with the implementations based on MPI or Hadoop Map/Reduce, this implementation builds upon the shared-nothing MPP databases and enables high-performance in-database analytics.</p>
+<p><a class="anchor" id="vocabulary"></a></p><dl class="section user"><dt>Vocabulary Format</dt><dd></dd></dl>
+<p>The vocabulary, or dictionary, indexes all the words found in the corpus and has the following format: </p><pre>{TABLE|VIEW} <em>vocab_table</em> (
+    <em>wordid</em> INTEGER,
+    <em>word</em> TEXT
+)</pre><p> where <code>wordid</code> refers the word ID (the index of a word in the vocabulary) and <code>word</code> is the actual word.</p>
+<dl class="section user"><dt>Usage</dt><dd><ul>
+<li><p class="startli">The training (i.e. topic inference) can be done with the following function: </p><pre>
+        SELECT <a class="el" href="lda_8sql__in.html#aeb7593251a4dedb695494f65dc2d1f80">lda_train</a>(
+            <em>'data_table'</em>,
+            <em>'model_table'</em>,
+            <em>'output_data_table'</em>,
+            <em>voc_size</em>,
+            <em>topic_num</em>,
+            <em>iter_num</em>,
+            <em>alpha</em>,
+            <em>beta</em>)
+    </pre><p class="startli">This function stores the resulting model in <code><em>model_table</em></code>. The table has only 1 row and is in the following form: </p><pre>{TABLE} <em>model_table</em> (
+        <em>voc_size</em> INTEGER,
+        <em>topic_num</em> INTEGER,
+        <em>alpha</em> FLOAT,
+        <em>beta</em> FLOAT,
+        <em>model</em> BIGINT[])
+    </pre><p class="startli">This function also stores the topic counts and the topic assignments in each document in <code><em>output_data_table</em></code>. The table is in the following form: </p><pre>{TABLE} <em>output_data_table</em> (
+        <em>docid</em> INTEGER,
+        <em>wordcount</em> INTEGER,
+        <em>words</em> INTEGER[],
+        <em>counts</em> INTEGER[],
+        <em>topic_count</em> INTEGER[],
+        <em>topic_assignment</em> INTEGER[])
+    </pre></li>
+<li><p class="startli">The prediction (i.e. labelling of test documents using a learned LDA model) can be done with the following function: </p><pre>
+        SELECT <a class="el" href="lda_8sql__in.html#aaa89e30c8fd0ba41b6feee01ee195330">lda_predict</a>(
+            <em>'data_table'</em>,
+            <em>'model_table'</em>,
+            <em>'output_table'</em>);
+    </pre><p class="startli">This function stores the prediction results in <em>output_table</em>. Each row in the table stores the topic distribution and the topic assignments for a docuemnt in the dataset. The table is in the following form: </p><pre>{TABLE} <em>output_table</em> (
+        <em>docid</em> INTEGER,
+        <em>wordcount</em> INTEGER,
+        <em>words</em> INTEGER,
+        <em>counts</em> INTEGER,
+        <em>topic_count</em> INTEGER[],
+        <em>topic_assignment</em> INTEGER[])
+    </pre></li>
+<li>This module also provides a function for computing the perplexity: <pre>
+        SELECT <a class="el" href="lda_8sql__in.html#a25c3ef12d9808d8a38c5fd2630f3b5a9">lda_get_perplexity</a>(
+            <em>'model_table'</em>,
+            <em>'output_data_table'</em>);
+    </pre></li>
+</ul>
+</dd></dl>
+<dl class="section user"><dt>Implementation Notes</dt><dd>The input format requires the user to tokenize each document into an array of words. This process involves tokenizing and filtering documents - a process out-of-scope for this module. Internally, the input data will be validated and then converted to the following format for efficiency: <pre>{TABLE} <em>__internal_data_table__</em> (
+    <em>docid</em> INTEGER,
+    <em>wordcount</em> INTEGER,
+    <em>words</em> INTEGER[],
+    <em>counts</em> INTEGER[])
+</pre> where <code>docid</code> is the document ID, <code>wordcount</code> is the number of words in the document, <code>words</code> is the list of unique words in the document, and <code>counts</code> is a list of the number of occurrences of each unique word in the document.</dd></dl>
+<p><a class="anchor" id="train"></a></p><dl class="section user"><dt>Training Function</dt><dd>The LDA training function has the following syntax. <pre class="syntax">
+lda_train( data_table,
+           model_table,
+           output_data_table,
+           voc_size,
+           topic_num,
+           iter_num,
+           alpha,
+           beta
+         )
+</pre> <b>Arguments</b> <dl class="arglist">
+<dt>data_table </dt>
+<dd><p class="startdd">TEXT. The name of the table storing the training dataset. Each row is in the form <code>&lt;docid, wordid, count&gt;</code> where <code>docid</code>, <code>wordid</code>, and <code>count</code> are non-negative integers.</p>
+<p class="enddd">The <code>docid</code> column refers to the document ID, the <code>wordid</code> column is the word ID (the index of a word in the vocabulary), and <code>count</code> is the number of occurrences of the word in the document.  </p>
+</dd>
+<dt>model_table </dt>
+<dd>TEXT. The name of the table storing the learned models. This table has one row and the following columns. <table  class="output">
+<tr>
+<th>voc_size </th><td>INTEGER. Size of the vocabulary. Note that the <code>wordid</code> should be continous integers starting from 0 to <code>voc_size</code> &minus; <code>1</code>. A data validation routine is called to validate the dataset.  </td></tr>
+<tr>
+<th>topic_num </th><td>INTEGER. Number of topics.  </td></tr>
+<tr>
+<th>alpha </th><td>DOUBLE PRECISION. Dirichlet parameter for the per-doc topic multinomial (e.g. 50/topic_num).  </td></tr>
+<tr>
+<th>beta </th><td>DOUBLE PRECISION. Dirichlet parameter for the per-topic word multinomial (e.g. 0.01).  </td></tr>
+<tr>
+<th>model </th><td>BIGINT[].  </td></tr>
+</table>
+</dd>
+<dt>output_data_table </dt>
+<dd>TEXT. The name of the table to store the output data. It has the following columns: <table  class="output">
+<tr>
+<th>docid </th><td>INTEGER.  </td></tr>
+<tr>
+<th>wordcount </th><td>INTEGER.  </td></tr>
+<tr>
+<th>words </th><td>INTEGER[].  </td></tr>
+<tr>
+<th>counts </th><td>INTEGER[].  </td></tr>
+<tr>
+<th>topic_count </th><td>INTEGER[].  </td></tr>
+<tr>
+<th>topic_assignment </th><td>INTEGER[].  </td></tr>
+</table>
+</dd>
+<dt>voc_size </dt>
+<dd>INTEGER. Size of the vocabulary. Note that the <code>wordid</code> should be continous integers starting from 0 to <code>voc_size</code> &minus; <code>1</code>. A data validation routine is called to validate the dataset. </dd>
+<dt>topic_num </dt>
+<dd>INTEGER. Number of topics. </dd>
+<dt>iter_num </dt>
+<dd>INTEGER. Number of iterations (e.g. 60). </dd>
+<dt>alpha </dt>
+<dd>DOUBLE PRECISION. Dirichlet parameter for the per-doc topic multinomial (e.g. 50/topic_num). </dd>
+<dt>beta </dt>
+<dd>DOUBLE PRECISION. Dirichlet parameter for the per-topic word multinomial (e.g. 0.01). </dd>
+</dl>
+</dd></dl>
+<p><a class="anchor" id="predict"></a></p><dl class="section user"><dt>Prediction Function</dt><dd></dd></dl>
+<p>Prediction&mdash;labelling test documents using a learned LDA model&mdash;is accomplished with the following function: </p><pre class="syntax">
+lda_predict( data_table,
+             model_table,
+             output_table
+           );
+</pre><p>This function stores the prediction results in <code><em>output_table</em></code>. Each row in the table stores the topic distribution and the topic assignments for a document in the dataset. The table has the following columns: </p><table  class="output">
+<tr>
+<th>docid </th><td>INTEGER.  </td></tr>
+<tr>
+<th>wordcount </th><td>INTEGER.  </td></tr>
+<tr>
+<th>words </th><td>INTEGER[]. List of word IDs in this document.  </td></tr>
+<tr>
+<th>counts </th><td>INTEGER[]. List of word counts in this document.  </td></tr>
+<tr>
+<th>topic_count </th><td>INTEGER[]. Of length topic_num, list of topic counts in this document.  </td></tr>
+<tr>
+<th>topic_assignment </th><td>INTEGER[]. Of length wordcount, list of topic index for each word.  </td></tr>
+</table>
+<p><a class="anchor" id="perplexity"></a></p><dl class="section user"><dt>Perplexity Function</dt><dd>This module provides a function for computing the perplexity. <pre class="syntax">
+lda_get_perplexity( model_table,
+                    output_data_table
+                  );
+</pre></dd></dl>
+<p><a class="anchor" id="examples"></a></p><dl class="section user"><dt>Examples</dt><dd></dd></dl>
+<ol type="1">
+<li>Prepare a training dataset for LDA. The examples below are small strings extracted from various Wikipedia documents . <pre class="example">
+CREATE TABLE documents(docid INT4, contents TEXT);
+INSERT INTO documents VALUES
+(0, 'Statistical topic models are a class of Bayesian latent variable models, originally developed for analyzing the semantic content of large document corpora.'),
+(1, 'By the late 1960s, the balance between pitching and hitting had swung in favor of the pitchers. In 1968 Carl Yastrzemski won the American League batting title with an average of just .301, the lowest in history.'),
+(2, 'Machine learning is closely related to and often overlaps with computational statistics; a discipline that also specializes in prediction-making. It has strong ties to mathematical optimization, which deliver methods, theory and application domains to the field.'),
+(3, 'California''s diverse geography ranges from the Sierra Nevada in the east to the Pacific Coast in the west, from the Redwood–Douglas fir forests of the northwest, to the Mojave Desert areas in the southeast. The center of the state is dominated by the Central Valley, a major agricultural area. ')
+</pre></li>
+<li>Build a word count table by extracting the words and building a histogram for each document using the <code>term_frequency</code> function (<a class="el" href="group__grp__text__utilities.html">Term Frequency</a>). <pre class="example">
+-- Convert a string to a list of words
+ALTER TABLE documents ADD COLUMN words TEXT[];
+UPDATE documents SET words = regexp_split_to_array(lower(contents), E'[\\s+\\.\\,]');
+
+-- Create the term frequency table
+DROP TABLE IF EXISTS my_training;
+SELECT madlib.term_frequency('documents', 'docid', 'words', 'my_training', TRUE);
+SELECT * FROM my_training order by docid limit 20;
+</pre> <pre class="result">
+ docid | wordid | count
+-------+--------+-------
+     0 |     57 |     1
+     0 |     86 |     1
+     0 |      4 |     1
+     0 |     55 |     1
+     0 |     69 |     2
+     0 |     81 |     1
+     0 |     30 |     1
+     0 |     33 |     1
+     0 |     36 |     1
+     0 |     43 |     1
+     0 |     25 |     1
+     0 |     65 |     2
+     0 |     72 |     1
+     0 |      9 |     1
+     0 |      0 |     2
+     0 |     29 |     1
+     0 |     18 |     1
+     0 |     12 |     1
+     0 |     96 |     1
+     0 |     91 |     1
+(20 rows)
+</pre> <pre class="example">
+SELECT * FROM my_training_vocabulary order by wordid limit 20;
+</pre> <pre class="result">
+ wordid |     word
+--------+--------------
+      0 |
+      1 | 1960s
+      2 | 1968
+      3 | 301
+      4 | a
+      5 | agricultural
+      6 | also
+      7 | american
+      8 | an
+      9 | analyzing
+     10 | and
+     11 | application
+     12 | are
+     13 | area
+     14 | areas
+     15 | average
+     16 | balance
+     17 | batting
+     18 | bayesian
+     19 | between
+(20 rows)
+</pre></li>
+<li>Create an LDA model using the <code><a class="el" href="lda_8sql__in.html#aeb7593251a4dedb695494f65dc2d1f80" title="This UDF provides an entry for the lda training process. ">lda_train()</a></code> function. <pre class="example">
+SELECT madlib.lda_train( 'my_training',
+                         'my_model',
+                         'my_outdata',
+                         104,
+                         5,
+                         10,
+                         5,
+                         0.01
+                       );
+</pre> After a successful run of the <a class="el" href="lda_8sql__in.html#aeb7593251a4dedb695494f65dc2d1f80" title="This UDF provides an entry for the lda training process. ">lda_train()</a> function two tables are generated, one for storing the learned model and the other for storing the output data table.</li>
+<li>To get the detailed information about the learned model, run these commands: <pre class="example">
+-- The topic description by top-k words
+SELECT madlib.lda_get_topic_desc( 'my_model',
+                                  'my_training_vocabulary',
+                                  'my_topic_desc',
+                                  15);
+select * from my_topic_desc order by topicid, prob DESC;
+</pre> <pre class="result">
+ topicid | wordid |        prob        |       word
+---------+--------+--------------------+-------------------
+       1 |     69 |  0.181900726392252 | of
+       1 |     52 | 0.0608353510895884 | is
+       1 |     65 | 0.0608353510895884 | models
+       1 |     30 | 0.0305690072639225 | corpora
+       1 |      1 | 0.0305690072639225 | 1960s
+       1 |     57 | 0.0305690072639225 | latent
+       1 |     35 | 0.0305690072639225 | diverse
+       1 |     81 | 0.0305690072639225 | semantic
+       1 |     19 | 0.0305690072639225 | between
+       1 |     75 | 0.0305690072639225 | pitchers
+       1 |     43 | 0.0305690072639225 | for
+       1 |      6 | 0.0305690072639225 | also
+       1 |     40 | 0.0305690072639225 | favor
+       1 |     47 | 0.0305690072639225 | had
+       1 |     28 | 0.0305690072639225 | computational
+       ....
+</pre>  <pre class="example">
+-- The per-word topic counts (sorted by topic id)
+SELECT madlib.lda_get_word_topic_count( 'my_model',
+                                        'my_word_topic_count');
+</pre>  <pre class="result">
+ wordid | topic_count
+--------+--------------
+      0 | {0,17,0,0,0}
+      1 | {1,0,0,0,0}
+      2 | {0,0,0,0,1}
+      3 | {0,0,0,0,1}
+      4 | {0,0,0,0,3}
+      5 | {0,1,0,0,0}
+      6 | {1,0,0,0,0}
+      7 | {1,0,0,0,0}
+      8 | {0,0,0,1,0}
+      9 | {1,0,0,0,0}
+     10 | {0,0,0,0,3}
+     11 | {0,0,1,0,0}
+     ....
+</pre></li>
+<li>To get the topic counts and the topic assignments for each doucment, run the following commands: <pre class="example">
+-- The per-document topic assignments and counts:
+SELECT docid, topic_assignment, topic_count FROM my_outdata;
+</pre> <pre class="result">
+ docid |                                                topic_assignment                                                 |  topic_count
+-------+-----------------------------------------------------------------------------------------------------------------+----------------
+     1 | {1,1,1,1,1,1,2,4,1,4,4,4,1,0,2,1,0,2,2,3,4,2,1,1,4,2,4,3,0,0,2,4,4,3,3,3,3,3,0,1,0,4}                           | {6,12,7,7,10}
+     3 | {1,1,1,1,1,1,4,0,2,3,1,2,0,0,0,1,2,2,1,3,3,2,2,1,2,2,2,0,3,0,4,1,0,0,1,4,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,4,3} | {8,12,10,21,4}
+     0 | {1,1,4,2,1,4,4,4,1,3,1,0,0,0,0,0,0,0,0,1,1,3,0,1}                                                               | {9,8,1,2,4}
+     2 | {1,1,1,1,4,1,4,4,2,0,2,4,1,1,4,1,2,0,1,3,1,2,4,3,2,4,4,3,1,2,0,3,3,1,4,3,3,3,2,1}                               | {3,13,7,8,9}
+(4 rows)
+</pre></li>
+<li>To use a learned LDA model for prediction (that is, to label new documents), use the following command: <pre class="example">
+SELECT madlib.lda_predict( 'my_testing',
+                           'my_model',
+                           'my_pred'
+                         );
+</pre> The test table (<em>my_testing</em>) is expected to be in the same form as the training table (<em>my_training</em>) and can be created with the same process. After a successful run of the <a class="el" href="lda_8sql__in.html#af1fde06c39dd12bb9e5544997f815323" title="This UDF provides an entry for the lda predicton process. ">lda_predict()</a> function, the prediction results are generated and stored in <em>my_pred</em>. This table has the same schema as the <em>my_outdata</em> table generated by the <a class="el" href="lda_8sql__in.html#aeb7593251a4dedb695494f65dc2d1f80" title="This UDF provides an entry for the lda training process. ">lda_train()</a> function.</li>
+<li>Use the following command to compute the perplexity of the result. <pre class="example">
+SELECT madlib.lda_get_perplexity( 'my_model',
+                                  'my_pred'
+                                );
+</pre></li>
+</ol>
+<p><a class="anchor" id="literature"></a></p><dl class="section user"><dt>Literature</dt><dd></dd></dl>
+<p>[1] D.M. Blei, A.Y. Ng, M.I. Jordan, <em>Latent Dirichlet Allocation</em>, Journal of Machine Learning Research, vol. 3, pp. 993-1022, 2003.</p>
+<p>[2] T. Griffiths and M. Steyvers, <em>Finding scientific topics</em>, PNAS, vol. 101, pp. 5228-5235, 2004.</p>
+<p>[3] Y. Wang, H. Bai, M. Stanton, W-Y. Chen, and E.Y. Chang, <em>lda: Parallel Dirichlet Allocation for Large-scale Applications</em>, AAIM, 2009.</p>
+<p>[4] <a href="http://en.wikipedia.org/wiki/Latent_Dirichlet_allocation">http://en.wikipedia.org/wiki/Latent_Dirichlet_allocation</a></p>
+<p>[5] J. Chang, Collapsed Gibbs sampling methods for topic models, R manual, 2010.</p>
+<p><a class="anchor" id="related"></a></p><dl class="section user"><dt>Related Topics</dt><dd>File <a class="el" href="lda_8sql__in.html" title="SQL functions for Latent Dirichlet Allocation. ">lda.sql_in</a> documenting the SQL functions. </dd></dl>
+</div><!-- contents -->
+</div><!-- doc-content -->
+<!-- start footer part -->
+<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
+  <ul>
+    <li class="footer">Generated on Mon Jul 27 2015 20:37:45 for MADlib by
+    <a href="http://www.doxygen.org/index.html">
+    <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.10 </li>
+  </ul>
+</div>
+</body>
+</html>

http://git-wip-us.apache.org/repos/asf/incubator-madlib-site/blob/7258f51a/docs/latest/group__grp__linalg.html
----------------------------------------------------------------------
diff --git a/docs/latest/group__grp__linalg.html b/docs/latest/group__grp__linalg.html
new file mode 100644
index 0000000..70a87dc
--- /dev/null
+++ b/docs/latest/group__grp__linalg.html
@@ -0,0 +1,308 @@
+<!-- HTML header for doxygen 1.8.4-->
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<meta name="generator" content="Doxygen 1.8.10"/>
+<meta name="keywords" content="madlib,postgres,greenplum,machine learning,data mining,deep learning,ensemble methods,data science,market basket analysis,affinity analysis,pca,lda,regression,elastic net,huber white,proportional hazards,k-means,latent dirichlet allocation,bayes,support vector machines,svm"/>
+<title>MADlib: Norms and Distance functions</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="navtree.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="resize.js"></script>
+<script type="text/javascript" src="navtreedata.js"></script>
+<script type="text/javascript" src="navtree.js"></script>
+<script type="text/javascript">
+  $(document).ready(initResizable);
+  $(window).load(resizeHeight);
+</script>
+<link href="search/search.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="search/searchdata.js"></script>
+<script type="text/javascript" src="search/search.js"></script>
+<script type="text/javascript">
+  $(document).ready(function() { init_search(); });
+</script>
+<script type="text/x-mathjax-config">
+  MathJax.Hub.Config({
+    extensions: ["tex2jax.js", "TeX/AMSmath.js", "TeX/AMSsymbols.js"],
+    jax: ["input/TeX","output/HTML-CSS"],
+});
+</script><script src="../mathjax/MathJax.js"></script>
+<!-- hack in the navigation tree -->
+<script type="text/javascript" src="navtree_hack.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+<link href="madlib_extra.css" rel="stylesheet" type="text/css"/>
+<!-- google analytics -->
+<script>
+  (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
+  (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
+  m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
+  })(window,document,'script','//www.google-analytics.com/analytics.js','ga');
+  ga('create', 'UA-45382226-1', 'auto');
+  ga('send', 'pageview');
+</script>
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td id="projectlogo"><a href="http://madlib.incubator.apache.org"><img alt="Logo" src="madlib.png" height="50" style="padding-left:0.5em;" border="0"/ ></a></td>
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">
+   <span id="projectnumber">1.8</span>
+   </div>
+   <div id="projectbrief">User Documentation for MADlib</div>
+  </td>
+   <td>        <div id="MSearchBox" class="MSearchBoxInactive">
+        <span class="left">
+          <img id="MSearchSelect" src="search/mag_sel.png"
+               onmouseover="return searchBox.OnSearchSelectShow()"
+               onmouseout="return searchBox.OnSearchSelectHide()"
+               alt=""/>
+          <input type="text" id="MSearchField" value="Search" accesskey="S"
+               onfocus="searchBox.OnSearchFieldFocus(true)" 
+               onblur="searchBox.OnSearchFieldFocus(false)" 
+               onkeyup="searchBox.OnSearchFieldChange(event)"/>
+          </span><span class="right">
+            <a id="MSearchClose" href="javascript:searchBox.CloseResultsWindow()"><img id="MSearchCloseImg" border="0" src="search/close.png" alt=""/></a>
+          </span>
+        </div>
+</td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.10 -->
+<script type="text/javascript">
+var searchBox = new SearchBox("searchBox", "search",false,'Search');
+</script>
+</div><!-- top -->
+<div id="side-nav" class="ui-resizable side-nav-resizable">
+  <div id="nav-tree">
+    <div id="nav-tree-contents">
+      <div id="nav-sync" class="sync"></div>
+    </div>
+  </div>
+  <div id="splitbar" style="-moz-user-select:none;" 
+       class="ui-resizable-handle">
+  </div>
+</div>
+<script type="text/javascript">
+$(document).ready(function(){initNavTree('group__grp__linalg.html','');});
+</script>
+<div id="doc-content">
+<!-- window showing the filter options -->
+<div id="MSearchSelectWindow"
+     onmouseover="return searchBox.OnSearchSelectShow()"
+     onmouseout="return searchBox.OnSearchSelectHide()"
+     onkeydown="return searchBox.OnSearchSelectKey(event)">
+</div>
+
+<!-- iframe showing the search results (closed by default) -->
+<div id="MSearchResultsWindow">
+<iframe src="javascript:void(0)" frameborder="0" 
+        name="MSearchResults" id="MSearchResults">
+</iframe>
+</div>
+
+<div class="header">
+  <div class="headertitle">
+<div class="title">Norms and Distance functions<div class="ingroups"><a class="el" href="group__grp__datatrans.html">Data Types and Transforms</a> &raquo; <a class="el" href="group__grp__arraysmatrix.html">Arrays and Matrices</a></div></div>  </div>
+</div><!--header-->
+<div class="contents">
+<div class="toc"><b>Contents</b> </p><ul>
+<li class="level1">
+<a href="#functions">Linear Algebra Utility Functions</a> </li>
+<li class="level1">
+<a href="#examples">Examples</a> </li>
+<li class="level1">
+<a href="#literature">Literature</a> </li>
+<li class="level1">
+<a href="#related">Related Functions</a> </li>
+</ul>
+</div><p>The linalg module consists of useful utility functions for basic linear algebra operations. Several of these functions can be used while implementing new algorithms. These functions operate on vectors (1-D FLOAT8 array) and matrices (2-D FLOAT8 array). Note that other array types may need to be casted into FLOAT8[] before calling the functions.</p>
+<p>Refer to the <a class="el" href="linalg_8sql__in.html" title="SQL functions for linear algebra. ">linalg.sql_in</a> file for documentation on each of the utility functions.</p>
+<p><a class="anchor" id="functions"></a></p><dl class="section user"><dt>Linear Algebra Utility Functions</dt><dd><table  class="output">
+<tr>
+<th><a class="el" href="linalg_8sql__in.html#a300300fe4b8576ba0b97b95d8dea3057" title="1-norm of a vector ">norm1()</a> </th><td><p class="starttd">1-norm of a vector, \(\|\vec{a}\|_1\).</p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th><a class="el" href="linalg_8sql__in.html#a50fdfe30cc0edc6888a909dbb4b4c239" title="2-norm of a vector ">norm2()</a> </th><td><p class="starttd">2-norm of a vector, \(\|\vec{a}\|_2\). </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th><a class="el" href="linalg_8sql__in.html#aad193850e79c4b9d811ca9bc53e13476" title="1-norm of the difference between two vectors ">dist_norm1()</a> </th><td><p class="starttd">1-norm of the difference between two vectors, \(\|\vec{a} - \vec{b}\|_1\). </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th><a class="el" href="linalg_8sql__in.html#aa58e51526edea6ea98db30b6f250adb4" title="2-norm of the difference between two vectors ">dist_norm2()</a> </th><td><p class="starttd">2-norm of the difference between two vectors, \(\|\vec{a} - \vec{b}\|_2\). </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th><a class="el" href="linalg_8sql__in.html#ad9cc156ae57bf7c0a2fe90798259105a" title="p-norm of the difference between two vectors ">dist_pnorm()</a> </th><td><p class="starttd">Generic p-norm of the difference between two vectors, \(\|\vec{a} - \vec{b}\|_p, p &gt; 0\). </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th><a class="el" href="linalg_8sql__in.html#a31fa9f2f5b45507c09f136464fdad1db" title="Infinity-norm of the difference between two vectors. ">dist_inf_norm()</a> </th><td><p class="starttd">Infinity-norm of the difference between two vectors, \(\|\vec{a} - \vec{b}\|_\infty\). </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th><a class="el" href="linalg_8sql__in.html#a00a08e69f27524f2096032214e15b668" title="Squared 2-norm of the difference between two vectors. ">squared_dist_norm2()</a> </th><td><p class="starttd">Squared 2-norm of the difference between two vectors, \(\|\vec{a} - \vec{b}\|_2^2\). </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th><a class="el" href="linalg_8sql__in.html#a1782f2ba00d9f9fab20894a576079f87" title="cosine similarity score between two vectors ">cosine_similarity()</a> </th><td><p class="starttd">Cosine score between two vectors, \(\frac{\vec{a} \cdot \vec{b}}{\|\vec{a}\|_2 \|\vec{b}\|_2}\). </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th><a class="el" href="linalg_8sql__in.html#a8c7b9281a72ff22caf06161701b27e84" title="Angle between two vectors. ">dist_angle()</a> </th><td><p class="starttd">Angle between two vectors in an Euclidean space, \(\cos^{-1}(\frac{\vec{a} \cdot \vec{b}}{\|\vec{a}\|_2 \|\vec{b}\|_2})\). </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th><a class="el" href="linalg_8sql__in.html#afa13b4c6122b99422d666dedea136c18" title="Tanimoto distance between two vectors. ">dist_tanimoto()</a> </th><td><p class="starttd">Tanimoto distance between two vectors. [1] </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th><a class="el" href="linalg_8sql__in.html#ac1397ac9f4a35b3b67c3be05b5e1a828" title="Jaccard distance between two vectors (treated as sets) ">dist_jaccard()</a> </th><td><p class="starttd">Jaccard distance between two varchar vectors treated as sets. </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th><a class="el" href="linalg_8sql__in.html#af6b905fcac7746ef0ed0c36df4a1e070" title="Get an indexed row of the given matrix (2-D array) ">get_row()</a> </th><td><p class="starttd">Return the indexed row of a matrix (2-D array). </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th><a class="el" href="linalg_8sql__in.html#a20f34c9e661191e5225cca7bc23252c5" title="Get an indexed col of the given matrix (2-D array) ">get_col()</a> </th><td><p class="starttd">Return the indexed col of a matrix (2-D array). </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th><a class="el" href="linalg_8sql__in.html#a1aa37f73fb1cd8d7d106aa518dd8c0b4" title="Compute the average of vectors. ">avg()</a> </th><td><p class="starttd">Compute the average of vectors. </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th><a class="el" href="linalg_8sql__in.html#a0b04663ca206f03e66aed5ea2b4cc461" title="Compute the normalized average of vectors. ">normalized_avg()</a> </th><td><p class="starttd">Compute the normalized average of vectors (unit vector in an Euclidean space). </p>
+<p class="endtd"></p>
+</td></tr>
+<tr>
+<th><a class="el" href="linalg_8sql__in.html#a9c439706f35d6cac89f151d553a5f111" title="Combine vectors to a matrix. ">matrix_agg()</a> </th><td><p class="starttd">Combine vectors to a matrix. </p>
+<p class="endtd"></p>
+</td></tr>
+</table>
+</dd></dl>
+<p><a class="anchor" id="examples"></a></p>
+<p><b>Vector Norms and Distances</b></p>
+<ol type="1">
+<li>Create a database table with two vector columns and add some data. <pre class="example">
+CREATE TABLE two_vectors(
+    id  integer,
+    a   float8[],
+    b   float8[]);
+</pre> <pre class="example">
+INSERT INTO two_vectors VALUES
+(1, '{3,4}', '{4,5}'),
+(2, '{1,1,0,-4,5,3,4,106,14}', '{1,1,0,6,-3,1,2,92,2}');
+</pre></li>
+<li>Invoke norm functions. <pre class="example">
+SELECT
+    id,
+    madlib.norm1(a),
+    madlib.norm2(a)
+FROM two_vectors;
+</pre> Result: <pre class="result">
+ id | norm1 |      norm2
+----+-------+------------------
+  1 |     7 |                5
+  2 |   138 | 107.238052947636
+(2 rows)
+</pre></li>
+<li>Invoke distance functions. <pre class="example">
+SELECT
+    id,
+    madlib.dist_norm1(a, b),
+    madlib.dist_norm2(a, b),
+    madlib.dist_pnorm(a, b, 5) AS norm5,
+    madlib.dist_inf_norm(a, b),
+    madlib.squared_dist_norm2(a, b) AS sq_dist_norm2,
+    madlib.cosine_similarity(a, b),
+    madlib.dist_angle(a, b),
+    madlib.dist_tanimoto(a, b),
+    madlib.dist_jaccard(a::text[], b::text[])
+FROM two_vectors;
+</pre> Result: <pre class="result">
+ id | dist_norm1 |    dist_norm2    |      norm5       | dist_inf_norm | sq_dist_norm2 | cosine_similarity |     dist_angle     |   dist_tanimoto    |   dist_jaccard
+----+------------+------------------+------------------+---------------+---------------+-------------------+--------------------+--------------------+-------------------
+  1 |          2 |  1.4142135623731 | 1.14869835499704 |             1 |             2 | 0.999512076087079 | 0.0312398334302684 | 0.0588235294117647 | 0.666666666666667
+  2 |         48 | 22.6274169979695 |  15.585086360695 |            14 |           512 | 0.985403348449008 |   0.17106899659286 | 0.0498733684005455 | 0.833333333333333
+(2 rows)
+</pre></li>
+</ol>
+<p><b>Matrix Functions</b></p>
+<ol type="1">
+<li>Create a database table with a matrix column. <pre class="example">
+CREATE TABLE matrix(
+    id  integer,
+    m   float8[]);
+</pre> <pre class="example">
+INSERT INTO matrix VALUES
+(1, '{{4,5},{3,5},{9,0}}');
+</pre></li>
+<li>Invoke matrix functions. <pre class="example">
+SELECT
+    madlib.get_row(m, 1) AS row_1,
+    madlib.get_row(m, 2) AS row_2,
+    madlib.get_row(m, 3) AS row_3,
+    madlib.get_col(m, 1) AS col_1,
+    madlib.get_col(m, 2) AS col_2
+FROM matrix;
+</pre> Result: <pre class="result">
+ row_1 | row_2 | row_3 |  col_1  |  col_2
+-------+-------+-------+---------+---------
+ {4,5} | {3,5} | {9,0} | {4,3,9} | {5,5,0}
+(1 row)
+</pre></li>
+</ol>
+<p><b>Aggregate Functions</b></p>
+<ol type="1">
+<li>Create a database table with a vector column. <pre class="example">
+CREATE TABLE vector(
+    id  integer,
+    v   float8[]);
+</pre> <pre class="example">
+INSERT INTO vector VALUES
+(1, '{4,3}'),
+(2, '{8,6}'),
+(3, '{12,9}');
+</pre></li>
+<li>Invoke aggregate functions. <pre class="example">
+SELECT
+    madlib.avg(v),
+    madlib.normalized_avg(v),
+    madlib.matrix_agg(v)
+FROM vector;
+</pre> Result: <pre class="result">
+  avg  | normalized_avg |      matrix_agg
+-------+----------------+----------------------
+ {8,6} | {0.8,0.6}      | {{4,3},{8,6},{12,9}}
+(1 row)
+</pre></li>
+</ol>
+<p><a class="anchor" id="literature"></a></p><dl class="section user"><dt>Literature</dt><dd></dd></dl>
+<p>[1] <a href="http://en.wikipedia.org/wiki/Jaccard_index#Tanimoto_similarity_and_distance">http://en.wikipedia.org/wiki/Jaccard_index#Tanimoto_similarity_and_distance</a></p>
+<p><a class="anchor" id="related"></a></p><dl class="section user"><dt>Related Topics</dt><dd>File <a class="el" href="linalg_8sql__in.html" title="SQL functions for linear algebra. ">linalg.sql_in</a> documenting the SQL functions. </dd></dl>
+</div><!-- contents -->
+</div><!-- doc-content -->
+<!-- start footer part -->
+<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
+  <ul>
+    <li class="footer">Generated on Mon Jul 27 2015 20:37:45 for MADlib by
+    <a href="http://www.doxygen.org/index.html">
+    <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.10 </li>
+  </ul>
+</div>
+</body>
+</html>

http://git-wip-us.apache.org/repos/asf/incubator-madlib-site/blob/7258f51a/docs/latest/group__grp__linear__solver.html
----------------------------------------------------------------------
diff --git a/docs/latest/group__grp__linear__solver.html b/docs/latest/group__grp__linear__solver.html
new file mode 100644
index 0000000..9fb412d
--- /dev/null
+++ b/docs/latest/group__grp__linear__solver.html
@@ -0,0 +1,143 @@
+<!-- HTML header for doxygen 1.8.4-->
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<meta name="generator" content="Doxygen 1.8.10"/>
+<meta name="keywords" content="madlib,postgres,greenplum,machine learning,data mining,deep learning,ensemble methods,data science,market basket analysis,affinity analysis,pca,lda,regression,elastic net,huber white,proportional hazards,k-means,latent dirichlet allocation,bayes,support vector machines,svm"/>
+<title>MADlib: Linear Solvers</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="navtree.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="resize.js"></script>
+<script type="text/javascript" src="navtreedata.js"></script>
+<script type="text/javascript" src="navtree.js"></script>
+<script type="text/javascript">
+  $(document).ready(initResizable);
+  $(window).load(resizeHeight);
+</script>
+<link href="search/search.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="search/searchdata.js"></script>
+<script type="text/javascript" src="search/search.js"></script>
+<script type="text/javascript">
+  $(document).ready(function() { init_search(); });
+</script>
+<script type="text/x-mathjax-config">
+  MathJax.Hub.Config({
+    extensions: ["tex2jax.js", "TeX/AMSmath.js", "TeX/AMSsymbols.js"],
+    jax: ["input/TeX","output/HTML-CSS"],
+});
+</script><script src="../mathjax/MathJax.js"></script>
+<!-- hack in the navigation tree -->
+<script type="text/javascript" src="navtree_hack.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+<link href="madlib_extra.css" rel="stylesheet" type="text/css"/>
+<!-- google analytics -->
+<script>
+  (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
+  (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
+  m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
+  })(window,document,'script','//www.google-analytics.com/analytics.js','ga');
+  ga('create', 'UA-45382226-1', 'auto');
+  ga('send', 'pageview');
+</script>
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td id="projectlogo"><a href="http://madlib.incubator.apache.org"><img alt="Logo" src="madlib.png" height="50" style="padding-left:0.5em;" border="0"/ ></a></td>
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">
+   <span id="projectnumber">1.8</span>
+   </div>
+   <div id="projectbrief">User Documentation for MADlib</div>
+  </td>
+   <td>        <div id="MSearchBox" class="MSearchBoxInactive">
+        <span class="left">
+          <img id="MSearchSelect" src="search/mag_sel.png"
+               onmouseover="return searchBox.OnSearchSelectShow()"
+               onmouseout="return searchBox.OnSearchSelectHide()"
+               alt=""/>
+          <input type="text" id="MSearchField" value="Search" accesskey="S"
+               onfocus="searchBox.OnSearchFieldFocus(true)" 
+               onblur="searchBox.OnSearchFieldFocus(false)" 
+               onkeyup="searchBox.OnSearchFieldChange(event)"/>
+          </span><span class="right">
+            <a id="MSearchClose" href="javascript:searchBox.CloseResultsWindow()"><img id="MSearchCloseImg" border="0" src="search/close.png" alt=""/></a>
+          </span>
+        </div>
+</td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.10 -->
+<script type="text/javascript">
+var searchBox = new SearchBox("searchBox", "search",false,'Search');
+</script>
+</div><!-- top -->
+<div id="side-nav" class="ui-resizable side-nav-resizable">
+  <div id="nav-tree">
+    <div id="nav-tree-contents">
+      <div id="nav-sync" class="sync"></div>
+    </div>
+  </div>
+  <div id="splitbar" style="-moz-user-select:none;" 
+       class="ui-resizable-handle">
+  </div>
+</div>
+<script type="text/javascript">
+$(document).ready(function(){initNavTree('group__grp__linear__solver.html','');});
+</script>
+<div id="doc-content">
+<!-- window showing the filter options -->
+<div id="MSearchSelectWindow"
+     onmouseover="return searchBox.OnSearchSelectShow()"
+     onmouseout="return searchBox.OnSearchSelectHide()"
+     onkeydown="return searchBox.OnSearchSelectKey(event)">
+</div>
+
+<!-- iframe showing the search results (closed by default) -->
+<div id="MSearchResultsWindow">
+<iframe src="javascript:void(0)" frameborder="0" 
+        name="MSearchResults" id="MSearchResults">
+</iframe>
+</div>
+
+<div class="header">
+  <div class="summary">
+<a href="#groups">Modules</a>  </div>
+  <div class="headertitle">
+<div class="title">Linear Solvers<div class="ingroups"><a class="el" href="group__grp__utility__functions.html">Utility Functions</a></div></div>  </div>
+</div><!--header-->
+<div class="contents">
+<a name="details" id="details"></a><h2 class="groupheader">Detailed Description</h2>
+<p>A collection of methods that implement solutions for systems of consistent linear equations. </p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2 class="groupheader"><a name="groups"></a>
+Modules</h2></td></tr>
+<tr class="memitem:group__grp__dense__linear__solver"><td class="memItemLeft" align="right" valign="top">&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__grp__dense__linear__solver.html">Dense Linear Systems</a></td></tr>
+<tr class="memdesc:group__grp__dense__linear__solver"><td class="mdescLeft">&#160;</td><td class="mdescRight">Implements solution methods for large dense linear systems. Currently, restricted to problems that fit in memory. <br /></td></tr>
+<tr class="separator:"><td class="memSeparator" colspan="2">&#160;</td></tr>
+<tr class="memitem:group__grp__sparse__linear__solver"><td class="memItemLeft" align="right" valign="top">&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__grp__sparse__linear__solver.html">Sparse Linear Systems</a></td></tr>
+<tr class="memdesc:group__grp__sparse__linear__solver"><td class="mdescLeft">&#160;</td><td class="mdescRight">Implements solution methods for linear systems with sparse matrix input. Currently, restricted to problems that fit in memory. <br /></td></tr>
+<tr class="separator:"><td class="memSeparator" colspan="2">&#160;</td></tr>
+</table>
+</div><!-- contents -->
+</div><!-- doc-content -->
+<!-- start footer part -->
+<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
+  <ul>
+    <li class="footer">Generated on Mon Jul 27 2015 20:37:45 for MADlib by
+    <a href="http://www.doxygen.org/index.html">
+    <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.10 </li>
+  </ul>
+</div>
+</body>
+</html>

http://git-wip-us.apache.org/repos/asf/incubator-madlib-site/blob/7258f51a/docs/latest/group__grp__linear__solver.js
----------------------------------------------------------------------
diff --git a/docs/latest/group__grp__linear__solver.js b/docs/latest/group__grp__linear__solver.js
new file mode 100644
index 0000000..01508a7
--- /dev/null
+++ b/docs/latest/group__grp__linear__solver.js
@@ -0,0 +1,5 @@
+var group__grp__linear__solver =
+[
+    [ "Dense Linear Systems", "group__grp__dense__linear__solver.html", null ],
+    [ "Sparse Linear Systems", "group__grp__sparse__linear__solver.html", null ]
+];
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-madlib-site/blob/7258f51a/docs/latest/group__grp__linreg.html
----------------------------------------------------------------------
diff --git a/docs/latest/group__grp__linreg.html b/docs/latest/group__grp__linreg.html
new file mode 100644
index 0000000..67012a8
--- /dev/null
+++ b/docs/latest/group__grp__linreg.html
@@ -0,0 +1,388 @@
+<!-- HTML header for doxygen 1.8.4-->
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<meta name="generator" content="Doxygen 1.8.10"/>
+<meta name="keywords" content="madlib,postgres,greenplum,machine learning,data mining,deep learning,ensemble methods,data science,market basket analysis,affinity analysis,pca,lda,regression,elastic net,huber white,proportional hazards,k-means,latent dirichlet allocation,bayes,support vector machines,svm"/>
+<title>MADlib: Linear Regression</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="navtree.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="resize.js"></script>
+<script type="text/javascript" src="navtreedata.js"></script>
+<script type="text/javascript" src="navtree.js"></script>
+<script type="text/javascript">
+  $(document).ready(initResizable);
+  $(window).load(resizeHeight);
+</script>
+<link href="search/search.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="search/searchdata.js"></script>
+<script type="text/javascript" src="search/search.js"></script>
+<script type="text/javascript">
+  $(document).ready(function() { init_search(); });
+</script>
+<script type="text/x-mathjax-config">
+  MathJax.Hub.Config({
+    extensions: ["tex2jax.js", "TeX/AMSmath.js", "TeX/AMSsymbols.js"],
+    jax: ["input/TeX","output/HTML-CSS"],
+});
+</script><script src="../mathjax/MathJax.js"></script>
+<!-- hack in the navigation tree -->
+<script type="text/javascript" src="navtree_hack.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+<link href="madlib_extra.css" rel="stylesheet" type="text/css"/>
+<!-- google analytics -->
+<script>
+  (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
+  (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
+  m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
+  })(window,document,'script','//www.google-analytics.com/analytics.js','ga');
+  ga('create', 'UA-45382226-1', 'auto');
+  ga('send', 'pageview');
+</script>
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td id="projectlogo"><a href="http://madlib.incubator.apache.org"><img alt="Logo" src="madlib.png" height="50" style="padding-left:0.5em;" border="0"/ ></a></td>
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">
+   <span id="projectnumber">1.8</span>
+   </div>
+   <div id="projectbrief">User Documentation for MADlib</div>
+  </td>
+   <td>        <div id="MSearchBox" class="MSearchBoxInactive">
+        <span class="left">
+          <img id="MSearchSelect" src="search/mag_sel.png"
+               onmouseover="return searchBox.OnSearchSelectShow()"
+               onmouseout="return searchBox.OnSearchSelectHide()"
+               alt=""/>
+          <input type="text" id="MSearchField" value="Search" accesskey="S"
+               onfocus="searchBox.OnSearchFieldFocus(true)" 
+               onblur="searchBox.OnSearchFieldFocus(false)" 
+               onkeyup="searchBox.OnSearchFieldChange(event)"/>
+          </span><span class="right">
+            <a id="MSearchClose" href="javascript:searchBox.CloseResultsWindow()"><img id="MSearchCloseImg" border="0" src="search/close.png" alt=""/></a>
+          </span>
+        </div>
+</td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.10 -->
+<script type="text/javascript">
+var searchBox = new SearchBox("searchBox", "search",false,'Search');
+</script>
+</div><!-- top -->
+<div id="side-nav" class="ui-resizable side-nav-resizable">
+  <div id="nav-tree">
+    <div id="nav-tree-contents">
+      <div id="nav-sync" class="sync"></div>
+    </div>
+  </div>
+  <div id="splitbar" style="-moz-user-select:none;" 
+       class="ui-resizable-handle">
+  </div>
+</div>
+<script type="text/javascript">
+$(document).ready(function(){initNavTree('group__grp__linreg.html','');});
+</script>
+<div id="doc-content">
+<!-- window showing the filter options -->
+<div id="MSearchSelectWindow"
+     onmouseover="return searchBox.OnSearchSelectShow()"
+     onmouseout="return searchBox.OnSearchSelectHide()"
+     onkeydown="return searchBox.OnSearchSelectKey(event)">
+</div>
+
+<!-- iframe showing the search results (closed by default) -->
+<div id="MSearchResultsWindow">
+<iframe src="javascript:void(0)" frameborder="0" 
+        name="MSearchResults" id="MSearchResults">
+</iframe>
+</div>
+
+<div class="header">
+  <div class="headertitle">
+<div class="title">Linear Regression<div class="ingroups"><a class="el" href="group__grp__super.html">Supervised Learning</a> &raquo; <a class="el" href="group__grp__regml.html">Regression Models</a></div></div>  </div>
+</div><!--header-->
+<div class="contents">
+<div class="toc"><b>Contents</b> </p><ul>
+<li class="level1">
+<a href="#train">Training Function</a> </li>
+<li class="level1">
+<a href="#predict">Prediction Function</a> </li>
+<li class="level1">
+<a href="#examples">Examples</a> </li>
+<li class="level1">
+<a href="#background">Technical Background</a> </li>
+<li class="level1">
+<a href="#literature">Literature</a> </li>
+<li class="level1">
+<a href="#related">Related Topics</a> </li>
+</ul>
+</div><p>Linear regression models a linear relationship of a scalar dependent variable \( y \) to one or more explanatory independent variables \( x \) to build a model of coefficients.</p>
+<p><a class="anchor" id="train"></a></p><dl class="section user"><dt>Training Function</dt><dd></dd></dl>
+<p>The linear regression training function has the following syntax. </p><pre class="syntax">
+linregr_train( source_table,
+               out_table,
+               dependent_varname,
+               independent_varname,
+               grouping_cols,
+               heteroskedasticity_option
+             )
+</pre><p><b>Arguments</b> </p><dl class="arglist">
+<dt>source_table </dt>
+<dd><p class="startdd">TEXT. The name of the table containing the training data.</p>
+<p class="enddd"></p>
+</dd>
+<dt>out_table </dt>
+<dd><p class="startdd">TEXT. Name of the generated table containing the output model.</p>
+<p>The output table contains the following columns. </p><table  class="output">
+<tr>
+<th>&lt;...&gt; </th><td>Any grouping columns provided during training. Present only if the grouping option is used.  </td></tr>
+<tr>
+<th>coef </th><td>FLOAT8[]. Vector of the coefficients of the regression.  </td></tr>
+<tr>
+<th>r2 </th><td>FLOAT8. R-squared coefficient of determination of the model.  </td></tr>
+<tr>
+<th>std_err </th><td>FLOAT8[]. Vector of the standard error of the coefficients.  </td></tr>
+<tr>
+<th>t_stats </th><td>FLOAT8[]. Vector of the t-statistics of the coefficients.  </td></tr>
+<tr>
+<th>p_values </th><td>FLOAT8[]. Vector of the p-values of the coefficients.  </td></tr>
+<tr>
+<th>condition_no </th><td>FLOAT8 array. The condition number of the \(X^{*}X\) matrix. A high condition number is usually an indication that there may be some numeric instability in the result yielding a less reliable model. A high condition number often results when there is a significant amount of colinearity in the underlying design matrix, in which case other regression techniques, such as elastic net regression, may be more appropriate.  </td></tr>
+<tr>
+<th>bp_stats </th><td>FLOAT8. The Breush-Pagan statistic of heteroskedacity. Present only if the heteroskedacity argument was set to True when the model was trained.  </td></tr>
+<tr>
+<th>bp_p_value </th><td>FLOAT8. The Breush-Pagan calculated p-value. Present only if the heteroskedacity parameter was set to True when the model was trained.  </td></tr>
+<tr>
+<th>num_rows_processed </th><td>INTEGER. The number of rows that are actually used in each group.  </td></tr>
+<tr>
+<th>num_missing_rows_skipped </th><td>INTEGER. The number of rows that have NULL values in the dependent and independent variables, and were skipped in the computation for each group. </td></tr>
+</table>
+<p class="enddd">A summary table named &lt;out_table&gt;_summary is created together with the output table. It has the following columns: </p><table  class="output">
+<tr>
+<th>source_table </th><td>The data source table name </td></tr>
+<tr>
+<th>out_table </th><td>The output table name </td></tr>
+<tr>
+<th>dependent_varname </th><td>The dependent variable </td></tr>
+<tr>
+<th>independent_varname </th><td>The independent variables </td></tr>
+<tr>
+<th>num_rows_processed </th><td>The total number of rows that were used in the computation. </td></tr>
+<tr>
+<th>num_missing_rows_skipped </th><td>The total number of rows that were skipped because of NULL values in them. </td></tr>
+</table>
+</dd>
+<dt></dt>
+<dd><dl class="section note"><dt>Note</dt><dd>For p-values, we just return the computation result directly. Other statistical packages, like 'R', produce the same result, but on printing the result to screen, another format function is used and any p-value that is smaller than the machine epsilon (the smallest positive floating-point number 'x' such that '1 + x != 1') will be printed on screen as "&lt; xxx" (xxx is the value of the machine epsilon). Although the result may look different, they are in fact the same. </dd></dl>
+</dd>
+<dt>dependent_varname </dt>
+<dd><p class="startdd">TEXT. Expression to evaluate for the dependent variable.</p>
+<p class="enddd"></p>
+</dd>
+<dt>independent_varname </dt>
+<dd><p class="startdd">TEXT. Expression list to evaluate for the independent variables. An intercept variable is not assumed. It is common to provide an explicit intercept term by including a single constant <code>1</code> term in the independent variable list.</p>
+<p class="enddd"></p>
+</dd>
+<dt>grouping_cols (optional) </dt>
+<dd><p class="startdd">TEXT, default: NULL. An expression list used to group the input dataset into discrete groups, running one regression per group. Similar to the SQL <code>GROUP BY</code> clause. When this value is null, no grouping is used and a single result model is generated.</p>
+<p class="enddd"></p>
+</dd>
+<dt>heteroskedasticity_option (optional) </dt>
+<dd>BOOLEAN, default: FALSE. When TRUE, the heteroskedasticity of the model is also calculated and returned with the results. </dd>
+</dl>
+<p><a class="anchor" id="warning"></a></p><dl class="section warning"><dt>Warning</dt><dd>The aggregate 'linregr' has been deprecated in favor of the function 'linregr_train'. If the aggregate 'linregr' is used to output the results of linear regression to a table, it is recommended to follow the general pattern shown below (replace text within '&lt;...&gt;' with the appropriate variable names). <pre class="syntax">
+CREATE TABLE &lt;output table&gt; AS
+SELECT (r).*
+FROM (
+    SELECT linregr(&lt;dependent variable&gt;, &lt;independent variable&gt;) as r
+    FROM &lt;source table&gt;
+    ) q;
+</pre></dd></dl>
+<p><a class="anchor" id="predict"></a></p><dl class="section user"><dt>Prediction Function</dt><dd><pre class="syntax">
+linregr_predict(coef, col_ind)
+</pre> <b>Arguments</b> <dl class="arglist">
+<dt>coef </dt>
+<dd><p class="startdd">FLOAT8[]. Vector of the coefficients of regression.</p>
+<p class="enddd"></p>
+</dd>
+<dt>col_ind </dt>
+<dd><p class="startdd">FLOAT8[]. An array containing the independent variable column names. </p>
+<p class="enddd"><a class="anchor" id="examples"></a></p>
+</dd>
+</dl>
+</dd></dl>
+<dl class="section user"><dt>Examples</dt><dd><ol type="1">
+<li>Create an input data set. <pre class="example">
+CREATE TABLE houses (id INT, tax INT, bedroom INT, bath FLOAT, price INT,
+            size INT, lot INT);
+COPY houses FROM STDIN WITH DELIMITER '|';
+  1 |  590 |       2 |    1 |  50000 |  770 | 22100
+  2 | 1050 |       3 |    2 |  85000 | 1410 | 12000
+  3 |   20 |       3 |    1 |  22500 | 1060 |  3500
+  4 |  870 |       2 |    2 |  90000 | 1300 | 17500
+  5 | 1320 |       3 |    2 | 133000 | 1500 | 30000
+  6 | 1350 |       2 |    1 |  90500 |  820 | 25700
+  7 | 2790 |       3 |  2.5 | 260000 | 2130 | 25000
+  8 |  680 |       2 |    1 | 142500 | 1170 | 22000
+  9 | 1840 |       3 |    2 | 160000 | 1500 | 19000
+ 10 | 3680 |       4 |    2 | 240000 | 2790 | 20000
+ 11 | 1660 |       3 |    1 |  87000 | 1030 | 17500
+ 12 | 1620 |       3 |    2 | 118600 | 1250 | 20000
+ 13 | 3100 |       3 |    2 | 140000 | 1760 | 38000
+ 14 | 2070 |       2 |    3 | 148000 | 1550 | 14000
+ 15 |  650 |       3 |  1.5 |  65000 | 1450 | 12000
+\.
+</pre></li>
+<li>Train a regression model. First, a single regression for all the data. <pre class="example">
+SELECT madlib.linregr_train( 'houses',
+                             'houses_linregr',
+                             'price',
+                             'ARRAY[1, tax, bath, size]'
+                           );
+</pre></li>
+<li>Generate three output models, one for each value of "bedroom". <pre class="example">
+SELECT madlib.linregr_train( 'houses',
+                             'houses_linregr_bedroom',
+                             'price',
+                             'ARRAY[1, tax, bath, size]',
+                             'bedroom'
+                           );
+</pre></li>
+<li>Examine the resulting models. <pre class="example">
+-- Set extended display on for easier reading of output
+\x ON
+SELECT * FROM houses_linregr;
+</pre> Result: <pre class="result">
+-[ RECORD 1 ]+---------------------------------------------------------------------------
+coef         | {-12849.4168959872,28.9613922651765,10181.6290712648,50.516894915354}
+r2           | 0.768577580597443
+std_err      | {33453.0344331391,15.8992104963997,19437.7710925923,32.928023174087}
+t_stats      | {-0.38410317968819,1.82156166004184,0.523806408809133,1.53416118083605}
+p_values     | {0.708223134615422,0.0958005827189772,0.610804093526536,0.153235085548186}
+condition_no | 9002.50457085737
+</pre></li>
+<li>View the results grouped by bedroom. <pre class="example">
+SELECT * FROM houses_linregr_bedroom;
+</pre> Result: <pre class="result">
+-[ RECORD 1 ]+--------------------------------------------------------------------------
+bedroom      | 2
+coef         | {-84242.0345406597,55.4430144648696,-78966.9753675319,225.611910021192}
+r2           | 0.968809546465313
+std_err      | {35018.9991665742,19.5731125320686,23036.8071292552,49.0448678148784}
+t_stats      | {-2.40560942761235,2.83261103077151,-3.42786111480046,4.60011251070697}
+p_values     | {0.250804617665239,0.21605133377602,0.180704400437373,0.136272031474122}
+condition_no | 10086.1048721726
+-[ RECORD 2 ]+--------------------------------------------------------------------------
+bedroom      | 4
+coef         | {0.0112536020318378,41.4132554771633,0.0225072040636757,31.3975496688276}
+r2           | 1
+std_err      | {0,0,0,0}
+t_stats      | {Infinity,Infinity,Infinity,Infinity}
+p_values     |
+condition_no | Infinity
+-[ RECORD 3 ]+--------------------------------------------------------------------------
+bedroom      | 3
+coef         | {-88155.8292501601,27.1966436294429,41404.0293363612,62.637521075324}
+r2           | 0.841699901311252
+std_err      | {57867.9999702625,17.8272309154689,43643.1321511114,70.8506824863954}
+t_stats      | {-1.52339512849005,1.52556747362508,0.948695185143966,0.884077878676067}
+p_values     | {0.188161432894871,0.187636685729869,0.386340032374927,0.417132778705789}
+condition_no | 11722.6225642147
+</pre> Alternatively you can unnest the results for easier reading of output. <pre class="example">
+\x OFF
+SELECT unnest(ARRAY['intercept','tax','bath','size']) as attribute,
+       unnest(coef) as coefficient,
+       unnest(std_err) as standard_error,
+       unnest(t_stats) as t_stat,
+       unnest(p_values) as pvalue
+FROM houses_linregr;
+</pre></li>
+<li>Use the prediction function to evaluate residuals. <pre class="example">
+SELECT houses.*,
+       madlib.linregr_predict( ARRAY[1,tax,bath,size],
+                               m.coef
+                             ) as predict,
+        price -
+          madlib.linregr_predict( ARRAY[1,tax,bath,size],
+                                  m.coef
+                                ) as residual
+FROM houses, houses_linregr m;
+</pre></li>
+</ol>
+</dd></dl>
+<p><a class="anchor" id="notes"></a></p><dl class="section user"><dt>Note</dt><dd>All table names can be optionally schema qualified (current_schemas() would be searched if a schema name is not provided) and all table and column names should follow case-sensitivity and quoting rules per the database. (For instance, 'mytable' and 'MyTable' both resolve to the same entity, i.e. 'mytable'. If mixed-case or multi-byte characters are desired for entity names then the string should be double-quoted; in this case the input would be '"MyTable"').</dd></dl>
+<p><a class="anchor" id="background"></a></p><dl class="section user"><dt>Technical Background</dt><dd></dd></dl>
+<p>Ordinary least-squares (OLS) linear regression refers to a stochastic model in which the conditional mean of the dependent variable (usually denoted \( Y \)) is an affine function of the vector of independent variables (usually denoted \( \boldsymbol x \)). That is, </p><p class="formulaDsp">
+\[ E[Y \mid \boldsymbol x] = \boldsymbol c^T \boldsymbol x \]
+</p>
+<p> for some unknown vector of coefficients \( \boldsymbol c \). The assumption is that the residuals are i.i.d. distributed Gaussians. That is, the (conditional) probability density of \( Y \) is given by </p><p class="formulaDsp">
+\[ f(y \mid \boldsymbol x) = \frac{1}{\sqrt{2 \pi \sigma^2}} \cdot \exp\left(-\frac{1}{2 \sigma^2} \cdot (y - \boldsymbol x^T \boldsymbol c)^2 \right) \,. \]
+</p>
+<p> OLS linear regression finds the vector of coefficients \( \boldsymbol c \) that maximizes the likelihood of the observations.</p>
+<p>Let</p><ul>
+<li>\( \boldsymbol y \in \mathbf R^n \) denote the vector of observed dependent variables, with \( n \) rows, containing the observed values of the dependent variable,</li>
+<li>\( X \in \mathbf R^{n \times k} \) denote the design matrix with \( k \) columns and \( n \) rows, containing all observed vectors of independent variables. \( \boldsymbol x_i \) as rows,</li>
+<li>\( X^T \) denote the transpose of \( X \),</li>
+<li>\( X^+ \) denote the pseudo-inverse of \( X \).</li>
+</ul>
+<p>Maximizing the likelihood is equivalent to maximizing the log-likelihood \( \sum_{i=1}^n \log f(y_i \mid \boldsymbol x_i) \), which simplifies to minimizing the <b>residual sum of squares</b> \( RSS \) (also called sum of squared residuals or sum of squared errors of prediction), </p><p class="formulaDsp">
+\[ RSS = \sum_{i=1}^n ( y_i - \boldsymbol c^T \boldsymbol x_i )^2 = (\boldsymbol y - X \boldsymbol c)^T (\boldsymbol y - X \boldsymbol c) \,. \]
+</p>
+<p> The first-order conditions yield that the \( RSS \) is minimized at </p><p class="formulaDsp">
+\[ \boldsymbol c = (X^T X)^+ X^T \boldsymbol y \,. \]
+</p>
+<p>Computing the <b>total sum of squares</b> \( TSS \), the <b>explained sum of squares</b> \( ESS \) (also called the regression sum of squares), and the <b>coefficient of determination</b> \( R^2 \) is done according to the following formulas: </p><p class="formulaDsp">
+\begin{align*} ESS &amp; = \boldsymbol y^T X \boldsymbol c - \frac{ \| y \|_1^2 }{n} \\ TSS &amp; = \sum_{i=1}^n y_i^2 - \frac{ \| y \|_1^2 }{n} \\ R^2 &amp; = \frac{ESS}{TSS} \end{align*}
+</p>
+<p> Note: The last equality follows from the definition \( R^2 = 1 - \frac{RSS}{TSS} \) and the fact that for linear regression \( TSS = RSS + ESS \). A proof of the latter can be found, e.g., at: <a href="http://en.wikipedia.org/wiki/Sum_of_squares">http://en.wikipedia.org/wiki/Sum_of_squares</a></p>
+<p>We estimate the variance \( Var[Y - \boldsymbol c^T \boldsymbol x \mid \boldsymbol x] \) as </p><p class="formulaDsp">
+\[ \sigma^2 = \frac{RSS}{n - k} \]
+</p>
+<p> and compute the t-statistic for coefficient \( i \) as </p><p class="formulaDsp">
+\[ t_i = \frac{c_i}{\sqrt{\sigma^2 \cdot \left( (X^T X)^{-1} \right)_{ii} }} \,. \]
+</p>
+<p>The \( p \)-value for coefficient \( i \) gives the probability of seeing a value at least as extreme as the one observed, provided that the null hypothesis ( \( c_i = 0 \)) is true. Letting \( F_\nu \) denote the cumulative density function of student-t with \( \nu \) degrees of freedom, the \( p \)-value for coefficient \( i \) is therefore </p><p class="formulaDsp">
+\[ p_i = \Pr(|T| \geq |t_i|) = 2 \cdot (1 - F_{n - k}( |t_i| )) \]
+</p>
+<p> where \( T \) is a student-t distributed random variable with mean 0.</p>
+<p>The condition number [2] \( \kappa(X) = \|X\|_2\cdot\|X^{-1}\|_2\) is computed as the product of two spectral norms [3]. The spectral norm of a matrix \(X\) is the largest singular value of \(X\) i.e. the square root of the largest eigenvalue of the positive-semidefinite matrix \(X^{*}X\):</p>
+<p class="formulaDsp">
+\[ \|X\|_2 = \sqrt{\lambda_{\max}\left(X^{*}X\right)}\ , \]
+</p>
+<p> where \(X^{*}\) is the conjugate transpose of \(X\). The condition number of a linear regression problem is a worst-case measure of how sensitive the result is to small perturbations of the input. A large condition number (say, more than 1000) indicates the presence of significant multicollinearity.</p>
+<p><a class="anchor" id="literature"></a></p><dl class="section user"><dt>Literature</dt><dd></dd></dl>
+<p>[1] Cosma Shalizi: Statistics 36-350: Data Mining, Lecture Notes, 21 October 2009, <a href="http://www.stat.cmu.edu/~cshalizi/350/lectures/17/lecture-17.pdf">http://www.stat.cmu.edu/~cshalizi/350/lectures/17/lecture-17.pdf</a></p>
+<p>[2] Wikipedia: Condition Number, <a href="http://en.wikipedia.org/wiki/Condition_number">http://en.wikipedia.org/wiki/Condition_number</a>.</p>
+<p>[3] Wikipedia: Spectral Norm, <a href="http://en.wikipedia.org/wiki/Spectral_norm#Spectral_norm">http://en.wikipedia.org/wiki/Spectral_norm#Spectral_norm</a></p>
+<p>[4] Wikipedia: Breusch–Pagan test, <a href="http://en.wikipedia.org/wiki/Breusch%E2%80%93Pagan_test">http://en.wikipedia.org/wiki/Breusch%E2%80%93Pagan_test</a></p>
+<p>[5] Wikipedia: Heteroscedasticity-consistent standard errors, <a href="http://en.wikipedia.org/wiki/Heteroscedasticity-consistent_standard_errors">http://en.wikipedia.org/wiki/Heteroscedasticity-consistent_standard_errors</a></p>
+<p><a class="anchor" id="related"></a></p><dl class="section user"><dt>Related Topics</dt><dd></dd></dl>
+<p><a class="el" href="group__grp__robust.html">Robust Variance</a></p>
+<p><a class="el" href="group__grp__clustered__errors.html">Clustered Variance</a></p>
+<p><a class="el" href="group__grp__validation.html">Cross Validation</a></p>
+<p>File <a class="el" href="linear_8sql__in.html" title="SQL functions for linear regression. ">linear.sql_in</a>, source file for the SQL functions</p>
+</div><!-- contents -->
+</div><!-- doc-content -->
+<!-- start footer part -->
+<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
+  <ul>
+    <li class="footer">Generated on Mon Jul 27 2015 20:37:45 for MADlib by
+    <a href="http://www.doxygen.org/index.html">
+    <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.10 </li>
+  </ul>
+</div>
+</body>
+</html>

http://git-wip-us.apache.org/repos/asf/incubator-madlib-site/blob/7258f51a/docs/latest/group__grp__lmf.html
----------------------------------------------------------------------
diff --git a/docs/latest/group__grp__lmf.html b/docs/latest/group__grp__lmf.html
new file mode 100644
index 0000000..9d896b3
--- /dev/null
+++ b/docs/latest/group__grp__lmf.html
@@ -0,0 +1,278 @@
+<!-- HTML header for doxygen 1.8.4-->
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<meta name="generator" content="Doxygen 1.8.10"/>
+<meta name="keywords" content="madlib,postgres,greenplum,machine learning,data mining,deep learning,ensemble methods,data science,market basket analysis,affinity analysis,pca,lda,regression,elastic net,huber white,proportional hazards,k-means,latent dirichlet allocation,bayes,support vector machines,svm"/>
+<title>MADlib: Low-rank Matrix Factorization</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="navtree.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="resize.js"></script>
+<script type="text/javascript" src="navtreedata.js"></script>
+<script type="text/javascript" src="navtree.js"></script>
+<script type="text/javascript">
+  $(document).ready(initResizable);
+  $(window).load(resizeHeight);
+</script>
+<link href="search/search.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="search/searchdata.js"></script>
+<script type="text/javascript" src="search/search.js"></script>
+<script type="text/javascript">
+  $(document).ready(function() { init_search(); });
+</script>
+<script type="text/x-mathjax-config">
+  MathJax.Hub.Config({
+    extensions: ["tex2jax.js", "TeX/AMSmath.js", "TeX/AMSsymbols.js"],
+    jax: ["input/TeX","output/HTML-CSS"],
+});
+</script><script src="../mathjax/MathJax.js"></script>
+<!-- hack in the navigation tree -->
+<script type="text/javascript" src="navtree_hack.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+<link href="madlib_extra.css" rel="stylesheet" type="text/css"/>
+<!-- google analytics -->
+<script>
+  (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
+  (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
+  m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
+  })(window,document,'script','//www.google-analytics.com/analytics.js','ga');
+  ga('create', 'UA-45382226-1', 'auto');
+  ga('send', 'pageview');
+</script>
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td id="projectlogo"><a href="http://madlib.incubator.apache.org"><img alt="Logo" src="madlib.png" height="50" style="padding-left:0.5em;" border="0"/ ></a></td>
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">
+   <span id="projectnumber">1.8</span>
+   </div>
+   <div id="projectbrief">User Documentation for MADlib</div>
+  </td>
+   <td>        <div id="MSearchBox" class="MSearchBoxInactive">
+        <span class="left">
+          <img id="MSearchSelect" src="search/mag_sel.png"
+               onmouseover="return searchBox.OnSearchSelectShow()"
+               onmouseout="return searchBox.OnSearchSelectHide()"
+               alt=""/>
+          <input type="text" id="MSearchField" value="Search" accesskey="S"
+               onfocus="searchBox.OnSearchFieldFocus(true)" 
+               onblur="searchBox.OnSearchFieldFocus(false)" 
+               onkeyup="searchBox.OnSearchFieldChange(event)"/>
+          </span><span class="right">
+            <a id="MSearchClose" href="javascript:searchBox.CloseResultsWindow()"><img id="MSearchCloseImg" border="0" src="search/close.png" alt=""/></a>
+          </span>
+        </div>
+</td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.10 -->
+<script type="text/javascript">
+var searchBox = new SearchBox("searchBox", "search",false,'Search');
+</script>
+</div><!-- top -->
+<div id="side-nav" class="ui-resizable side-nav-resizable">
+  <div id="nav-tree">
+    <div id="nav-tree-contents">
+      <div id="nav-sync" class="sync"></div>
+    </div>
+  </div>
+  <div id="splitbar" style="-moz-user-select:none;" 
+       class="ui-resizable-handle">
+  </div>
+</div>
+<script type="text/javascript">
+$(document).ready(function(){initNavTree('group__grp__lmf.html','');});
+</script>
+<div id="doc-content">
+<!-- window showing the filter options -->
+<div id="MSearchSelectWindow"
+     onmouseover="return searchBox.OnSearchSelectShow()"
+     onmouseout="return searchBox.OnSearchSelectHide()"
+     onkeydown="return searchBox.OnSearchSelectKey(event)">
+</div>
+
+<!-- iframe showing the search results (closed by default) -->
+<div id="MSearchResultsWindow">
+<iframe src="javascript:void(0)" frameborder="0" 
+        name="MSearchResults" id="MSearchResults">
+</iframe>
+</div>
+
+<div class="header">
+  <div class="headertitle">
+<div class="title">Low-rank Matrix Factorization<div class="ingroups"><a class="el" href="group__grp__datatrans.html">Data Types and Transforms</a> &raquo; <a class="el" href="group__grp__arraysmatrix.html">Arrays and Matrices</a> &raquo; <a class="el" href="group__grp__matrix__factorization.html">Matrix Factorization</a></div></div>  </div>
+</div><!--header-->
+<div class="contents">
+<div class="toc"><b>Contents</b> </p><ul>
+<li>
+<a href="#syntax">Function Syntax</a> </li>
+<li>
+<a href="#examples">Examples</a> </li>
+<li>
+<a href="#literature">Literature</a> </li>
+</ul>
+</div><p>This module implements "factor model" for representing an incomplete matrix using a low-rank approximation [1]. Mathematically, this model seeks to find matrices U and V (also referred as factors) that, for any given incomplete matrix A, minimizes:</p>
+<p class="formulaDsp">
+\[ \|\boldsymbol A - \boldsymbol UV^{T} \|_2 \]
+</p>
+<p>subject to \(rank(\boldsymbol UV^{T}) \leq r\), where \(\|\cdot\|_2\) denotes the Frobenius norm. Let \(A\) be a \(m \times n\) matrix, then \(U\) will be \(m \times r\) and \(V\) will be \(n \times r\), in dimension, and \(1 \leq r \ll \min(m, n)\). This model is not intended to do the full decomposition, or to be used as part of inverse procedure. This model has been widely used in recommendation systems (e.g., Netflix [2]) and feature selection (e.g., image processing [3]).</p>
+<p><a class="anchor" id="syntax"></a></p><dl class="section user"><dt>Function Syntax</dt><dd></dd></dl>
+<p>Low-rank matrix factorization of an incomplete matrix into two factors.</p>
+<pre class="syntax">
+lmf_igd_run( rel_output,
+             rel_source,
+             col_row,
+             col_column,
+             col_value,
+             row_dim,
+             column_dim,
+             max_rank,
+             stepsize,
+             scale_factor,
+             num_iterations,
+             tolerance
+           )
+</pre><p> <b>Arguments</b> </p><dl class="arglist">
+<dt>rel_output </dt>
+<dd><p class="startdd">TEXT. The name of the table to receive the output.</p>
+<p>Output factors matrix U and V are in a flattened format. </p><pre>RESULT AS (
+        matrix_u    DOUBLE PRECISION[],
+        matrix_v    DOUBLE PRECISION[],
+        rmse        DOUBLE PRECISION
+);</pre><p class="enddd">Features correspond to row i is <code>matrix_u[i:i][1:r]</code>. Features correspond to column j is <code>matrix_v[j:j][1:r]</code>.  </p>
+</dd>
+<dt>rel_source </dt>
+<dd><p class="startdd">TEXT. The name of the table containing the input data.</p>
+<p>The input matrix&gt; is expected to be of the following form: </p><pre>{TABLE|VIEW} <em>input_table</em> (
+    <em>row</em>    INTEGER,
+    <em>col</em>    INTEGER,
+    <em>value</em>  DOUBLE PRECISION
+)</pre><p class="enddd">Input is contained in a table that describes an incomplete matrix, with available entries specified as (row, column, value). The input matrix should be 1-based, which means row &gt;= 1, and col &gt;= 1. NULL values are not expected.  </p>
+</dd>
+<dt>col_row </dt>
+<dd>TEXT. The name of the column containing the row number. </dd>
+<dt>col_column </dt>
+<dd>TEXT. The name of the column containing the column number. </dd>
+<dt>col_value </dt>
+<dd>DOUBLE PRECISION. The value at (row, col). </dd>
+<dt>row_dim (optional) </dt>
+<dd>INTEGER, default: "SELECT max(col_row) FROM rel_source". The number of columns in the matrix. </dd>
+<dt>column_dim (optional) </dt>
+<dd>INTEGER, default: "SELECT max(col_col) FROM rel_source". The number of rows in the matrix. </dd>
+<dt>max_rank </dt>
+<dd>INTEGER, default: 20. The rank of desired approximation. </dd>
+<dt>stepsize (optional) </dt>
+<dd>DOUBLE PRECISION, default: 0.01. Hyper-parameter that decides how aggressive the gradient steps are.  </dd>
+<dt>scale_factor (optional) </dt>
+<dd>DOUBLE PRECISION, default: 0.1. Hyper-parameter that decides scale of initial factors. </dd>
+<dt>num_iterations (optional) </dt>
+<dd>INTEGER, default: 10. Maximum number if iterations to perform regardless of convergence. </dd>
+<dt>tolerance (optional) </dt>
+<dd>DOUBLE PRECISION, default: 0.0001. Acceptable level of error in convergence. </dd>
+</dl>
+<p><a class="anchor" id="examples"></a></p><dl class="section user"><dt>Examples</dt><dd></dd></dl>
+<ol type="1">
+<li>Prepare an input table/view: <pre class="example">
+DROP TABLE IF EXISTS lmf_data;
+CREATE TABLE lmf_data (
+ row INT,
+ col INT,
+ val FLOAT8
+);
+</pre></li>
+<li>Populate the input table with some data. <pre class="example">
+INSERT INTO lmf_data VALUES (1, 1, 5.0);
+INSERT INTO lmf_data VALUES (3, 100, 1.0);
+INSERT INTO lmf_data VALUES (999, 10000, 2.0);
+</pre></li>
+<li>Call the <a class="el" href="lmf_8sql__in.html#ac1acb1f0e1f7008118f21c83546a4602" title="Low-rank matrix factorization of a incomplete matrix into two factors. ">lmf_igd_run()</a> stored procedure. <pre class="example">
+DROP TABLE IF EXISTS lmf_model;
+SELECT madlib.lmf_igd_run( 'lmf_model',
+                           'lmf_data',
+                           'row',
+                           'col',
+                           'val',
+                           999,
+                           10000,
+                           3,
+                           0.1,
+                           2,
+                           10,
+                           1e-9
+                         );
+</pre> Example result (the exact result may not be the same). <pre class="result">
+NOTICE:
+Finished low-rank matrix factorization using incremental gradient
+DETAIL:
+   table : lmf_data (row, col, val)
+Results:
+   RMSE = 0.0145966345300041
+Output:
+   view : SELECT * FROM lmf_model WHERE id = 1
+ lmf_igd_run
+&#160;-----------
+           1
+ (1 row)
+</pre></li>
+<li>Sanity check of the result. You may need a model id returned and also indicated by the function <a class="el" href="lmf_8sql__in.html#ac1acb1f0e1f7008118f21c83546a4602" title="Low-rank matrix factorization of a incomplete matrix into two factors. ">lmf_igd_run()</a>, assuming 1 here: <pre class="example">
+SELECT array_dims(matrix_u) AS u_dims, array_dims(matrix_v) AS v_dims
+FROM lmf_model
+WHERE id = 1;
+</pre> Result: <pre class="result">
+     u_dims    |     v_dims
+ --------------+----------------
+  [1:999][1:3] | [1:10000][1:3]
+ (1 row)
+</pre></li>
+<li>Query the result value. <pre class="example">
+SELECT matrix_u[2:2][1:3] AS row_2_features
+FROM lmf_model
+WHERE id = 1;
+</pre> Example output (the exact result may not be the same): <pre class="result">
+                       row_2_features
+&#160;---------------------------------------------------------
+  {{1.12030523084104,0.522217971272767,0.0264869043603539}}
+ (1 row)
+</pre></li>
+<li>Make prediction of a missing entry (row=2, col=7654). <pre class="example">
+SELECT madlib.array_dot(
+    matrix_u[2:2][1:3],
+    matrix_v[7654:7654][1:3]
+    ) AS row_2_col_7654
+FROM lmf_model
+WHERE id = 1;
+</pre> Example output (the exact result may not be the same due the randomness of the algorithm): <pre class="result">
+   row_2_col_7654
+&#160;------------------
+  1.3201582940851
+ (1 row)
+</pre></li>
+</ol>
+<p><a class="anchor" id="literature"></a></p><dl class="section user"><dt>Literature</dt><dd></dd></dl>
+<p>[1] N. Srebro and T. Jaakkola. “Weighted Low-Rank Approximations.” In: ICML. Ed. by T. Fawcett and N. Mishra. AAAI Press, 2003, pp. 720–727. isbn: 1-57735-189-4.</p>
+<p>[2] Simon Funk, Netflix Update: Try This at Home, December 11 2006, <a href="http://sifter.org/~simon/journal/20061211.html">http://sifter.org/~simon/journal/20061211.html</a></p>
+<p>[3] J. Wright, A. Ganesh, S. Rao, Y. Peng, and Y. Ma. “Robust Principal Component Analysis: Exact Recovery of Corrupted Low-Rank Matrices via Convex Optimization.” In: NIPS. Ed. by Y. Bengio, D. Schuurmans, J. D. Lafferty, C. K. I. Williams, and A. Culotta. Curran Associates, Inc., 2009, pp. 2080–2088. isbn: 9781615679119. </p>
+</div><!-- contents -->
+</div><!-- doc-content -->
+<!-- start footer part -->
+<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
+  <ul>
+    <li class="footer">Generated on Mon Jul 27 2015 20:37:45 for MADlib by
+    <a href="http://www.doxygen.org/index.html">
+    <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.10 </li>
+  </ul>
+</div>
+</body>
+</html>