You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by yh...@apache.org on 2016/12/29 15:48:23 UTC

[5/5] spark-website git commit: Update Spark website for the release of Apache Spark 2.1.0

Update Spark website for the release of Apache Spark 2.1.0


Project: http://git-wip-us.apache.org/repos/asf/spark-website/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark-website/commit/e10180e6
Tree: http://git-wip-us.apache.org/repos/asf/spark-website/tree/e10180e6
Diff: http://git-wip-us.apache.org/repos/asf/spark-website/diff/e10180e6

Branch: refs/heads/asf-site
Commit: e10180e6784e1d9d8771ef42481687aec0a423a2
Parents: d2bcf18
Author: Yin Huai <yh...@databricks.com>
Authored: Wed Dec 28 18:00:05 2016 -0800
Committer: Yin Huai <yh...@databricks.com>
Committed: Thu Dec 29 07:46:11 2016 -0800

----------------------------------------------------------------------
 _layouts/global.html                            |   2 +-
 documentation.md                                |   1 +
 downloads.md                                    |   6 +-
 js/downloads.js                                 |   1 +
 news/_posts/2016-12-28-spark-2-1-0-released.md  |  14 +
 .../_posts/2016-12-28-spark-release-2-1-0.md    | 120 ++++++
 site/committers.html                            |  48 ++-
 site/community.html                             |  16 +-
 site/contributing.html                          |  28 +-
 site/developer-tools.html                       |  22 +-
 site/docs/latest                                |   2 +-
 site/documentation.html                         |  14 +-
 site/downloads.html                             |  14 +-
 site/examples.html                              | 100 ++---
 site/faq.html                                   |   8 +-
 site/graphx/index.html                          |   8 +-
 site/index.html                                 |   8 +-
 site/js/downloads.js                            |   1 +
 site/mailing-lists.html                         |   8 +-
 site/mllib/index.html                           |   8 +-
 site/news/amp-camp-2013-registration-ope.html   |   8 +-
 .../news/announcing-the-first-spark-summit.html |   8 +-
 .../news/fourth-spark-screencast-published.html |   8 +-
 site/news/index.html                            |  27 +-
 site/news/nsdi-paper.html                       |   8 +-
 site/news/one-month-to-spark-summit-2015.html   |   8 +-
 .../proposals-open-for-spark-summit-east.html   |   8 +-
 ...registration-open-for-spark-summit-east.html |   8 +-
 .../news/run-spark-and-shark-on-amazon-emr.html |   8 +-
 site/news/spark-0-6-1-and-0-5-2-released.html   |   8 +-
 site/news/spark-0-6-2-released.html             |   8 +-
 site/news/spark-0-7-0-released.html             |   8 +-
 site/news/spark-0-7-2-released.html             |   8 +-
 site/news/spark-0-7-3-released.html             |   8 +-
 site/news/spark-0-8-0-released.html             |   8 +-
 site/news/spark-0-8-1-released.html             |   8 +-
 site/news/spark-0-9-0-released.html             |   8 +-
 site/news/spark-0-9-1-released.html             |  10 +-
 site/news/spark-0-9-2-released.html             |  10 +-
 site/news/spark-1-0-0-released.html             |   8 +-
 site/news/spark-1-0-1-released.html             |   8 +-
 site/news/spark-1-0-2-released.html             |   8 +-
 site/news/spark-1-1-0-released.html             |  10 +-
 site/news/spark-1-1-1-released.html             |   8 +-
 site/news/spark-1-2-0-released.html             |   8 +-
 site/news/spark-1-2-1-released.html             |   8 +-
 site/news/spark-1-2-2-released.html             |  10 +-
 site/news/spark-1-3-0-released.html             |   8 +-
 site/news/spark-1-4-0-released.html             |   8 +-
 site/news/spark-1-4-1-released.html             |   8 +-
 site/news/spark-1-5-0-released.html             |   8 +-
 site/news/spark-1-5-1-released.html             |   8 +-
 site/news/spark-1-5-2-released.html             |   8 +-
 site/news/spark-1-6-0-released.html             |   8 +-
 site/news/spark-1-6-1-released.html             |   8 +-
 site/news/spark-1-6-2-released.html             |   8 +-
 site/news/spark-1-6-3-released.html             |   8 +-
 site/news/spark-2-0-0-released.html             |   8 +-
 site/news/spark-2-0-1-released.html             |   8 +-
 site/news/spark-2-0-2-released.html             |   8 +-
 site/news/spark-2-1-0-released.html             | 220 +++++++++++
 site/news/spark-2.0.0-preview.html              |   8 +-
 .../spark-accepted-into-apache-incubator.html   |   8 +-
 site/news/spark-and-shark-in-the-news.html      |  10 +-
 site/news/spark-becomes-tlp.html                |   8 +-
 site/news/spark-featured-in-wired.html          |   8 +-
 .../spark-mailing-lists-moving-to-apache.html   |   8 +-
 site/news/spark-meetups.html                    |   8 +-
 site/news/spark-screencasts-published.html      |   8 +-
 site/news/spark-summit-2013-is-a-wrap.html      |   8 +-
 site/news/spark-summit-2014-videos-posted.html  |   8 +-
 site/news/spark-summit-2015-videos-posted.html  |   8 +-
 site/news/spark-summit-agenda-posted.html       |   8 +-
 .../spark-summit-east-2015-videos-posted.html   |  10 +-
 .../spark-summit-east-2016-cfp-closing.html     |   8 +-
 site/news/spark-summit-east-agenda-posted.html  |   8 +-
 .../news/spark-summit-europe-agenda-posted.html |   8 +-
 site/news/spark-summit-europe.html              |   8 +-
 .../spark-summit-june-2016-agenda-posted.html   |   8 +-
 site/news/spark-tips-from-quantifind.html       |   8 +-
 .../spark-user-survey-and-powered-by-page.html  |   8 +-
 site/news/spark-version-0-6-0-released.html     |   8 +-
 .../spark-wins-cloudsort-100tb-benchmark.html   |   8 +-
 ...-wins-daytona-gray-sort-100tb-benchmark.html |   8 +-
 .../strata-exercises-now-available-online.html  |   8 +-
 .../news/submit-talks-to-spark-summit-2014.html |   8 +-
 .../news/submit-talks-to-spark-summit-2016.html |   8 +-
 .../submit-talks-to-spark-summit-east-2016.html |   8 +-
 .../submit-talks-to-spark-summit-eu-2016.html   |   8 +-
 site/news/two-weeks-to-spark-summit-2014.html   |   8 +-
 ...deo-from-first-spark-development-meetup.html |   8 +-
 site/powered-by.html                            |   8 +-
 site/release-process.html                       |  10 +-
 site/releases/spark-release-0-3.html            |   8 +-
 site/releases/spark-release-0-5-0.html          |   8 +-
 site/releases/spark-release-0-5-1.html          |   8 +-
 site/releases/spark-release-0-5-2.html          |   8 +-
 site/releases/spark-release-0-6-0.html          |   8 +-
 site/releases/spark-release-0-6-1.html          |   8 +-
 site/releases/spark-release-0-6-2.html          |   8 +-
 site/releases/spark-release-0-7-0.html          |   8 +-
 site/releases/spark-release-0-7-2.html          |   8 +-
 site/releases/spark-release-0-7-3.html          |   8 +-
 site/releases/spark-release-0-8-0.html          |  12 +-
 site/releases/spark-release-0-8-1.html          |   8 +-
 site/releases/spark-release-0-9-0.html          |   8 +-
 site/releases/spark-release-0-9-1.html          |  28 +-
 site/releases/spark-release-0-9-2.html          |   8 +-
 site/releases/spark-release-1-0-0.html          |   8 +-
 site/releases/spark-release-1-0-1.html          |  16 +-
 site/releases/spark-release-1-0-2.html          |  10 +-
 site/releases/spark-release-1-1-0.html          |  14 +-
 site/releases/spark-release-1-1-1.html          |   8 +-
 site/releases/spark-release-1-2-0.html          |  10 +-
 site/releases/spark-release-1-2-1.html          |   8 +-
 site/releases/spark-release-1-2-2.html          |   8 +-
 site/releases/spark-release-1-3-0.html          |  14 +-
 site/releases/spark-release-1-3-1.html          |  14 +-
 site/releases/spark-release-1-4-0.html          |  12 +-
 site/releases/spark-release-1-4-1.html          |   8 +-
 site/releases/spark-release-1-5-0.html          |  38 +-
 site/releases/spark-release-1-5-1.html          |   8 +-
 site/releases/spark-release-1-5-2.html          |   8 +-
 site/releases/spark-release-1-6-0.html          |  28 +-
 site/releases/spark-release-1-6-1.html          |   8 +-
 site/releases/spark-release-1-6-2.html          |   8 +-
 site/releases/spark-release-1-6-3.html          |   8 +-
 site/releases/spark-release-2-0-0.html          |  44 +--
 site/releases/spark-release-2-0-1.html          |   8 +-
 site/releases/spark-release-2-0-2.html          |   8 +-
 site/releases/spark-release-2-1-0.html          | 370 +++++++++++++++++++
 site/research.html                              |   8 +-
 site/screencasts/1-first-steps-with-spark.html  |   8 +-
 .../2-spark-documentation-overview.html         |   8 +-
 .../3-transformations-and-caching.html          |   8 +-
 .../4-a-standalone-job-in-spark.html            |   8 +-
 site/screencasts/index.html                     |   8 +-
 site/sitemap.xml                                |  20 +-
 site/sql/index.html                             |   8 +-
 site/streaming/index.html                       |   8 +-
 site/third-party-projects.html                  |  10 +-
 site/trademarks.html                            |   8 +-
 site/versioning-policy.html                     |  10 +-
 143 files changed, 1458 insertions(+), 722 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark-website/blob/e10180e6/_layouts/global.html
----------------------------------------------------------------------
diff --git a/_layouts/global.html b/_layouts/global.html
index 0fbfe5a..099454f 100644
--- a/_layouts/global.html
+++ b/_layouts/global.html
@@ -121,7 +121,7 @@
           Documentation <b class="caret"></b>
         </a>
         <ul class="dropdown-menu">
-          <li><a href="{{site.baseurl}}/docs/latest/">Latest Release (Spark 2.0.2)</a></li>
+          <li><a href="{{site.baseurl}}/docs/latest/">Latest Release (Spark 2.1.0)</a></li>
           <li><a href="{{site.baseurl}}/documentation.html">Older Versions and Other Resources</a></li>
           <li><a href="{{site.baseurl}}/faq.html">Frequently Asked Questions</a></li>
         </ul>

http://git-wip-us.apache.org/repos/asf/spark-website/blob/e10180e6/documentation.md
----------------------------------------------------------------------
diff --git a/documentation.md b/documentation.md
index 6234eac..6d7afdd 100644
--- a/documentation.md
+++ b/documentation.md
@@ -12,6 +12,7 @@ navigation:
 <p>Setup instructions, programming guides, and other documentation are available for each stable version of Spark below:</p>
 
 <ul>
+  <li><a href="{{site.baseurl}}/docs/2.1.0/">Spark 2.1.0</a></li>
   <li><a href="{{site.baseurl}}/docs/2.0.2/">Spark 2.0.2</a></li>
   <li><a href="{{site.baseurl}}/docs/2.0.1/">Spark 2.0.1</a></li>
   <li><a href="{{site.baseurl}}/docs/2.0.0/">Spark 2.0.0</a></li>

http://git-wip-us.apache.org/repos/asf/spark-website/blob/e10180e6/downloads.md
----------------------------------------------------------------------
diff --git a/downloads.md b/downloads.md
index ddf9ba6..cfbc6e4 100644
--- a/downloads.md
+++ b/downloads.md
@@ -51,7 +51,7 @@ Spark artifacts are [hosted in Maven Central](http://search.maven.org/#search%7C
 
     groupId: org.apache.spark
     artifactId: spark-core_2.11
-    version: 2.0.2
+    version: 2.1.0
 
 ### Spark Source Code Management
 If you are interested in working with the newest under-development code or contributing to Apache Spark development, you can also check out the master branch from Git:
@@ -59,8 +59,8 @@ If you are interested in working with the newest under-development code or contr
     # Master development branch
     git clone git://github.com/apache/spark.git
 
-    # 2.0 maintenance branch with stability fixes on top of Spark 2.0.2
-    git clone git://github.com/apache/spark.git -b branch-2.0
+    # 2.1 maintenance branch with stability fixes on top of Spark 2.1.0
+    git clone git://github.com/apache/spark.git -b branch-2.1
 
 Once you've downloaded Spark, you can find instructions for installing and building it on the <a href="{{site.baseurl}}/documentation.html">documentation page</a>.
 

http://git-wip-us.apache.org/repos/asf/spark-website/blob/e10180e6/js/downloads.js
----------------------------------------------------------------------
diff --git a/js/downloads.js b/js/downloads.js
index a7a5482..36a04c7 100644
--- a/js/downloads.js
+++ b/js/downloads.js
@@ -36,6 +36,7 @@ var packagesV7 = [hadoop2p7, hadoop2p6, hadoop2p4, hadoop2p3, hadoopFree, source
 
 // addRelease("2.0.0-preview", new Date("05/24/2016"), sources.concat(packagesV7), true, false);
 
+addRelease("2.1.0", new Date("12/28/2016"), packagesV7, true);
 addRelease("2.0.2", new Date("11/14/2016"), packagesV7, true);
 addRelease("2.0.1", new Date("10/03/2016"), packagesV7, true);
 addRelease("2.0.0", new Date("07/26/2016"), packagesV7, true);

http://git-wip-us.apache.org/repos/asf/spark-website/blob/e10180e6/news/_posts/2016-12-28-spark-2-1-0-released.md
----------------------------------------------------------------------
diff --git a/news/_posts/2016-12-28-spark-2-1-0-released.md b/news/_posts/2016-12-28-spark-2-1-0-released.md
new file mode 100644
index 0000000..4bd4ab1
--- /dev/null
+++ b/news/_posts/2016-12-28-spark-2-1-0-released.md
@@ -0,0 +1,14 @@
+---
+layout: post
+title: Spark 2.1.0 released
+categories:
+- News
+tags: []
+status: publish
+type: post
+published: true
+meta:
+  _edit_last: '4'
+  _wpas_done_all: '1'
+---
+We are happy to announce the availability of <a href="{{site.baseurl}}/releases/spark-release-2-1-0.html" title="Spark Release 2.1.0">Spark 2.1.0</a>! Visit the <a href="{{site.baseurl}}/releases/spark-release-2-1-0.html" title="Spark Release 2.1.0">release notes</a> to read about the new features, or <a href="{{site.baseurl}}/downloads.html">download</a> the release today.

http://git-wip-us.apache.org/repos/asf/spark-website/blob/e10180e6/releases/_posts/2016-12-28-spark-release-2-1-0.md
----------------------------------------------------------------------
diff --git a/releases/_posts/2016-12-28-spark-release-2-1-0.md b/releases/_posts/2016-12-28-spark-release-2-1-0.md
new file mode 100644
index 0000000..7a8f664
--- /dev/null
+++ b/releases/_posts/2016-12-28-spark-release-2-1-0.md
@@ -0,0 +1,120 @@
+---
+layout: post
+title: Spark Release 2.1.0
+categories: []
+tags: []
+status: publish
+type: post
+published: true
+meta:
+  _edit_last: '4'
+  _wpas_done_all: '1'
+---
+
+
+Apache Spark 2.1.0 is the second release on the 2.x line. This release makes significant strides in the production readiness of Structured Streaming, with added support for <a href="{{site.baseurl}}/docs/2.1.0/structured-streaming-programming-guide.html#handling-late-data-and-watermarking">event time watermarks</a> and <a href="{{site.baseurl}}/docs/2.1.0/structured-streaming-kafka-integration.html">Kafka 0.10 support</a>. In addition, this release focuses more on usability, stability, and polish, resolving over 1200 tickets.
+
+
+To download Apache Spark 2.1.0, visit the <a href="{{site.baseurl}}/downloads.html">downloads</a> page. You can consult JIRA for the [detailed changes](https://issues.apache.org/jira/secure/ReleaseNote.jspa?projectId=12315420&version=12335644). We have curated a list of high level changes here, grouped by major modules.
+
+
+* This will become a table of contents (this text will be scraped).
+{:toc}
+
+
+### Core and Spark SQL
+
+ - **API updates**
+   - SPARK-17864: Data type APIs are stable APIs. 
+   - SPARK-18351: from_json and to_json for parsing JSON for string columns
+   - SPARK-16700: When creating a DataFrame in PySpark, Python dictionaries can be used as values of a StructType.
+ - **Performance and stability**
+   - SPARK-17861: Scalable Partition Handling. Hive metastore stores all table partition metadata by default for Spark tables stored with Hive\u2019s storage formats as well as tables stored with Spark\u2019s native formats. This change reduces first query latency over partitioned tables and allows for the use of DDL commands to manipulate partitions for tables stored with Spark\u2019s native formats. <a href="{{site.baseurl}}/docs/2.1.0/sql-programming-guide.html#upgrading-from-spark-sql-20-to-21">Users can migrate tables stored with Spark's native formats created by previous versions by using the MSCK command</a>.
+   - SPARK-16523: Speeds up group-by aggregate performance by adding a fast aggregation cache that is backed by a row-based hashmap.
+ - **Other notable changes**
+   - SPARK-9876: parquet-mr upgraded to 1.8.1
+
+*Programming guides: <a href="{{site.baseurl}}/docs/2.1.0/programming-guide.html">Spark Programming Guide</a> and <a href="{{site.baseurl}}/docs/2.1.0/sql-programming-guide.html">Spark SQL, DataFrames and Datasets Guide</a>.*
+
+
+### Structured Streaming
+
+ - **API updates**
+   - SPARK-17346: <a href="{{site.baseurl}}/docs/2.1.0/structured-streaming-kafka-integration.html">Kafka 0.10 support in Structured Streaming</a>
+   - SPARK-17731: Metrics for Structured Streaming
+   - SPARK-17829: Stable format for offset log
+   - SPARK-18124: Observed delay based Event Time Watermarks
+   - SPARK-18192: Support all file formats in structured streaming
+   - SPARK-18516: Separate instantaneous state from progress performance statistics
+ - **Stability**
+   - SPARK-17267: Long running structured streaming requirements
+
+*Programming guide: <a href="{{site.baseurl}}/docs/2.1.0/structured-streaming-programming-guide.html">Structured Streaming Programming Guide</a>.*
+
+
+### MLlib
+
+ - **API updates**
+   - SPARK-5992: Locality Sensitive Hashing
+   - SPARK-7159: Multiclass Logistic Regression in DataFrame-based API
+   - SPARK-16000: ML persistence: Make model loading backwards-compatible with Spark 1.x with saved models using spark.mllib.linalg.Vector columns in DataFrame-based API
+ - **Performance and stability**
+   - SPARK-17748: Faster, more stable LinearRegression for < 4096 features
+   - SPARK-16719: RandomForest: communicate fewer trees on each iteration
+
+*Programming guide: <a href="{{site.baseurl}}/docs/2.1.0/ml-guide.html">Machine Learning Library (MLlib) Guide</a>.*
+
+
+### SparkR
+
+The main focus of SparkR in the 2.1.0 release was adding extensive support for ML algorithms, which include:
+
+ - New ML algorithms in SparkR including LDA, Gaussian Mixture Models, ALS, Random Forest, Gradient Boosted Trees, and more
+ - Support for multinomial logistic regression providing similar functionality as the glmnet R package
+ - Enable installing third party packages on workers using `spark.addFile` (<a href="https://issues.apache.org/jira/browse/SPARK-17577">SPARK-17577</a>).
+ - Standalone installable package built with the Apache Spark release. We will be submitting this to CRAN soon.
+
+*Programming guide: <a href="{{site.baseurl}}/docs/2.1.0/sparkr.html">SparkR (R on Spark)</a>.*
+
+### GraphX
+
+ - SPARK-11496: Personalized pagerank
+
+*Programming guide: <a href="{{site.baseurl}}/docs/2.1.0/graphx-programming-guide.html">GraphX Programming Guide</a>.*
+
+
+### Deprecations
+
+ - **MLlib**
+   - SPARK-18592: Deprecate unnecessary Param setter methods in tree and ensemble models
+
+
+### Changes of behavior
+
+
+ - **Core and SQL**
+   - SPARK-18360: The default table path of tables in the default database will be under the location of the default database instead of always depending on the warehouse location setting.
+   - SPARK-18377: spark.sql.warehouse.dir is a static configuration now. Users need to set it before the start of the first SparkSession and its value is shared by sessions in the same application.
+   - SPARK-14393: Values generated by non-deterministic functions will not change after coalesce or union.
+   - SPARK-18076: Fix default Locale used in DateFormat, NumberFormat to Locale.US
+   - SPARK-16216: CSV and JSON data sources write timestamp and date values in <a href="https://www.w3.org/TR/NOTE-datetime">ISO 8601 formatted string</a>. Two options, timestampFormat and dateFormat, are added to these two data sources to let users control the format of timestamp and date value in string representation, respectively. Please refer to the API doc of <a href="{{site.baseurl}}/docs/2.1.0/api/scala/index.html#org.apache.spark.sql.DataFrameReader">DataFrameReader</a> and <a href="{{site.baseurl}}/docs/2.1.0/api/scala/index.html#org.apache.spark.sql.DataFrameWriter">DataFrameWriter</a> for more details about these two configurations. 
+   - SPARK-17427: Function SIZE returns -1 when its input parameter is null.
+   - SPARK-16498: LazyBinaryColumnarSerDe is fixed as the the SerDe for RCFile. 
+   - SPARK-16552: If a user does not specify the schema to a table and relies on schema inference, the inferred schema will be stored in the metastore. The schema will be not inferred again when this table is used.
+ - **Structured Streaming**
+   - SPARK-18516: Separate instantaneous state from progress performance statistics
+ - **MLlib**
+   - SPARK-17870: ChiSquareSelector now accounts for degrees of freedom by using pValue rather than raw statistic to select the top features.
+
+
+
+### Known Issues
+
+ - SPARK-17647: In SQL LIKE clause, wildcard characters '%' and '_' right after backslashes are always escaped.
+ - SPARK-18908: If a StreamExecution fails to start, users need to check stderr for the error.
+
+
+
+### Credits
+Last but not least, this release would not have been possible without the following contributors:
+ALeksander Eskilson, Aaditya Ramesh, Adam Roberts, Adrian Petrescu, Ahmed Mahran, Alex Bozarth, Alexander Shorin, Alexander Ulanov, Andrew Duffy, Andrew Mills, Andrew Ray, Angus Gerry, Anthony Truchet, Anton Okolnychyi, Artur Sukhenko, Bartek Wisniewski, Bijay Pathak, Bill Chambers, Bjarne Fruergaard, Brian Cho, Bryan Cutler, Burak Yavuz, Cen Yu Hai, Charles Allen, Cheng Lian, Chie Hayashida, Christian Kadner, Clark Fitzgerald, Cody Koeninger, Daniel Darabos, Daoyuan Wang, David Navas, Davies Liu, Denny Lee, Devaraj K, Dhruve Ashar, Dilip Biswal, Ding Ding, Dmitriy Sokolov, Dongjoon Hyun, Drew Robb, Ekasit Kijsipongse, Eren Avsarogullari, Ergin Seyfe, Eric Liang, Erik O'Shaughnessy, Eyal Farago, Felix Cheung, Ferdinand Xu, Fred Reiss, Fu Xing, Gabriel Huang, Gaetan Semet, Gang Wu, Gayathri Murali, Gu Huiqin Alice, Guoqiang Li, Gurvinder Singh, Hao Ren, Herman Van Hovell, Hiroshi Inoue, Holden Karau, Hossein Falaki, Huang Zhaowei, Huaxin Gao, Hyukjin Kwon, Imran Rashid, Jacek Laskows
 ki, Jagadeesan A S, Jakob Odersky, Jason White, Jeff Zhang, Jianfei Wang, Jiang Xingbo, Jie Huang, Jie Xiong, Jisoo Kim, John Muller, Jose Hiram Soltren, Joseph K. Bradley, Josh Rosen, Jun Kim, Junyang Qian, Justin Pihony, Kapil Singh, Kay Ousterhout, Kazuaki Ishizaki, Kevin Grealish, Kevin McHale, Kishor Patil, Koert Kuipers, Kousuke Saruta, Krishna Kalyan, Liang Ke, Liang-Chi Hsieh, Lianhui Wang, Linbo Jin, Liwei Lin, Luciano Resende, Maciej Brynski, Maciej Szymkiewicz, Mahmoud Rawas, Manoj Kumar, Marcelo Vanzin, Mariusz Strzelecki, Mark Grover, Maxime Rihouey, Miao Wang, Michael Allman, Michael Armbrust, Michael Gummelt, Michal Senkyr, Michal Wesolowski, Mikael Staldal, Mike Ihbe, Mitesh Patel, Nan Zhu, Nattavut Sutyanyong, Nic Eggert, Nicholas Chammas, Nick Lavers, Nick Pentreath, Nicolas Fraison, Noritaka Sekiyama, Peng Meng, Peng, Meng, Pete Robbins, Peter Ableda, Peter Lee, Philipp Hoffmann, Prashant Sharma, Prince J Wesley, Priyanka Garg, Qian Huang, Qifan Pu, Rajesh Balamoh
 an, Reynold Xin, Robert Kruszewski, Russell Spitzer, Ryan Blue, Saisai Shao, Sameer Agarwal, Sami Jaktholm, Sandeep Purohit, Sandeep Singh, Satendra Kumar, Sean Owen, Sean Zhong, Seth Hendrickson, Sharkd Tu, Shen Hong, Shivansh Srivastava, Shivaram Venkataraman, Shixiong Zhu, Shuai Lin, Shubham Chopra, Sital Kedia, Song Jun, Srinath Shankar, Stavros Kontopoulos, Stefan Schulze, Steve Loughran, Suman Somasundar, Sun Dapeng, Sun Rui, Sunitha Kambhampati, Suresh Thalamati, Susan X. Huynh, Sylvain Zimmer, Takeshi YAMAMURO, Takuya UESHIN, Tao LI, Tao Lin, Tao Wang, Tarun Kumar, Tathagata Das, Tejas Patil, Thomas Graves, Timothy Chen, Timothy Hunter, Tom Graves, Tom Magrino, Tommy YU, Tyson Condie, Uncle Gen, Vinayak Joshi, Vincent Xie, Wang Fei, Wang Lei, Wang Tao, Wayne Zhang, Weichen Xu, Weiluo (David) Ren, Weiqing Yang, Wenchen Fan, Wesley Tang, William Benton, Wojciech Szymanski, Xiangrui Meng, Xianyang Liu, Xiao Li, Xin Ren, Xin Wu, Xing SHI, Xusen Yin, Yadong Qi, Yanbo Liang, Yang 
 Wang, Yangyang Liu, Yin Huai, Yu Peng, Yucai Yu, Yuhao Yang, Yuming Wang, Yun Ni, Yves Raimond, Zhan Zhang, Zheng RuiFeng, Zhenhua Wang, pkch, tone-zhang, yimuxi
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/spark-website/blob/e10180e6/site/committers.html
----------------------------------------------------------------------
diff --git a/site/committers.html b/site/committers.html
index 5069db0..12076f7 100644
--- a/site/committers.html
+++ b/site/committers.html
@@ -106,7 +106,7 @@
           Documentation <b class="caret"></b>
         </a>
         <ul class="dropdown-menu">
-          <li><a href="/docs/latest/">Latest Release (Spark 2.0.2)</a></li>
+          <li><a href="/docs/latest/">Latest Release (Spark 2.1.0)</a></li>
           <li><a href="/documentation.html">Older Versions and Other Resources</a></li>
           <li><a href="/faq.html">Frequently Asked Questions</a></li>
         </ul>
@@ -159,6 +159,9 @@
       <h5>Latest News</h5>
       <ul class="list-unstyled">
         
+          <li><a href="/news/spark-2-1-0-released.html">Spark 2.1.0 released</a>
+          <span class="small">(Dec 28, 2016)</span></li>
+        
           <li><a href="/news/spark-wins-cloudsort-100tb-benchmark.html">Spark wins CloudSort Benchmark as the most efficient engine</a>
           <span class="small">(Nov 15, 2016)</span></li>
         
@@ -168,9 +171,6 @@
           <li><a href="/news/spark-1-6-3-released.html">Spark 1.6.3 released</a>
           <span class="small">(Nov 07, 2016)</span></li>
         
-          <li><a href="/news/spark-2-0-1-released.html">Spark 2.0.1 released</a>
-          <span class="small">(Oct 03, 2016)</span></li>
-        
       </ul>
       <p class="small" style="text-align: right;"><a href="/news/index.html">Archive</a></p>
     </div>
@@ -484,30 +484,24 @@ follow-up can be well communicated to all Spark developers.
 (great!), but you risk introducing new or even worse bugs in maintenance releases (bad!). 
 The decision point is when you have a bug fix and it&#8217;s not clear whether it is worth backporting.</p>
 
-<p>I think the following facets are important to consider:</p>
-<ul>
-  <li>Backports are an extremely valuable service to the community and should be considered for 
-any bug fix.</li>
-  <li>Introducing a new bug in a maintenance release must be avoided at all costs. It over time would 
-erode confidence in our release process.</li>
-  <li>Distributions or advanced users can always backport risky patches on their own, if they see fit.</li>
-</ul>
-
-<p>For me, the consequence of these is that we should backport in the following situations:</p>
-<ul>
-  <li>Both the bug and the fix are well understood and isolated. Code being modified is well tested.</li>
-  <li>The bug being addressed is high priority to the community.</li>
-  <li>The backported fix does not vary widely from the master branch fix.</li>
-</ul>
-
-<p>We tend to avoid backports in the converse situations:</p>
-<ul>
-  <li>The bug or fix are not well understood. For instance, it relates to interactions between complex 
+<p>I think the following facets are important to consider:
+- Backports are an extremely valuable service to the community and should be considered for 
+any bug fix.
+- Introducing a new bug in a maintenance release must be avoided at all costs. It over time would 
+erode confidence in our release process.
+- Distributions or advanced users can always backport risky patches on their own, if they see fit.</p>
+
+<p>For me, the consequence of these is that we should backport in the following situations:
+- Both the bug and the fix are well understood and isolated. Code being modified is well tested.
+- The bug being addressed is high priority to the community.
+- The backported fix does not vary widely from the master branch fix.</p>
+
+<p>We tend to avoid backports in the converse situations:
+- The bug or fix are not well understood. For instance, it relates to interactions between complex 
 components or third party libraries (e.g. Hadoop libraries). The code is not well tested outside 
-of the immediate bug being fixed.</li>
-  <li>The bug is not clearly a high priority for the community.</li>
-  <li>The backported fix is widely different from the master branch fix.</li>
-</ul>
+of the immediate bug being fixed.
+- The bug is not clearly a high priority for the community.
+- The backported fix is widely different from the master branch fix.</p>
 
   </div>
 </div>

http://git-wip-us.apache.org/repos/asf/spark-website/blob/e10180e6/site/community.html
----------------------------------------------------------------------
diff --git a/site/community.html b/site/community.html
index ea360f5..7a38701 100644
--- a/site/community.html
+++ b/site/community.html
@@ -106,7 +106,7 @@
           Documentation <b class="caret"></b>
         </a>
         <ul class="dropdown-menu">
-          <li><a href="/docs/latest/">Latest Release (Spark 2.0.2)</a></li>
+          <li><a href="/docs/latest/">Latest Release (Spark 2.1.0)</a></li>
           <li><a href="/documentation.html">Older Versions and Other Resources</a></li>
           <li><a href="/faq.html">Frequently Asked Questions</a></li>
         </ul>
@@ -159,6 +159,9 @@
       <h5>Latest News</h5>
       <ul class="list-unstyled">
         
+          <li><a href="/news/spark-2-1-0-released.html">Spark 2.1.0 released</a>
+          <span class="small">(Dec 28, 2016)</span></li>
+        
           <li><a href="/news/spark-wins-cloudsort-100tb-benchmark.html">Spark wins CloudSort Benchmark as the most efficient engine</a>
           <span class="small">(Nov 15, 2016)</span></li>
         
@@ -168,9 +171,6 @@
           <li><a href="/news/spark-1-6-3-released.html">Spark 1.6.3 released</a>
           <span class="small">(Nov 07, 2016)</span></li>
         
-          <li><a href="/news/spark-2-0-1-released.html">Spark 2.0.1 released</a>
-          <span class="small">(Oct 03, 2016)</span></li>
-        
       </ul>
       <p class="small" style="text-align: right;"><a href="/news/index.html">Archive</a></p>
     </div>
@@ -212,14 +212,14 @@ as it is an active forum for Spark users&#8217; questions and answers.</p>
 <a href="http://stackoverflow.com/questions/tagged/apache-spark"><code>apache-spark</code></a> tag to see if 
 your question has already been answered</li>
       <li>Search the nabble archive for
-<a href="http://apache-spark-user-list.1001560.n3.nabble.com/">users@spark.apache.org</a></li>
+<a href="http://apache-spark-user-list.1001560.n3.nabble.com/">users@spark.apache.org</a> </li>
     </ul>
   </li>
-  <li>Please follow the StackOverflow <a href="http://stackoverflow.com/help/how-to-ask">code of conduct</a></li>
+  <li>Please follow the StackOverflow <a href="http://stackoverflow.com/help/how-to-ask">code of conduct</a>  </li>
   <li>Always use the <code>apache-spark</code> tag when asking questions</li>
   <li>Please also use a secondary tag to specify components so subject matter experts can more easily find them.
  Examples include: <code>pyspark</code>, <code>spark-dataframe</code>, <code>spark-streaming</code>, <code>spark-r</code>, <code>spark-mllib</code>, 
-<code>spark-ml</code>, <code>spark-graphx</code>, <code>spark-graphframes</code>, <code>spark-tensorframes</code>, etc.</li>
+<code>spark-ml</code>, <code>spark-graphx</code>, <code>spark-graphframes</code>, <code>spark-tensorframes</code>, etc. </li>
   <li>Please do not cross-post between StackOverflow and the mailing lists</li>
   <li>No jobs, sales, or solicitation is permitted on StackOverflow</li>
 </ul>
@@ -253,7 +253,7 @@ project, and scenarios, it is recommended you use the user@spark.apache.org mail
       <li>Search StackOverflow at <a href="http://stackoverflow.com/questions/tagged/apache-spark"><code>apache-spark</code></a> 
 to see if your question has already been answered</li>
       <li>Search the nabble archive for
-<a href="http://apache-spark-user-list.1001560.n3.nabble.com/">users@spark.apache.org</a></li>
+<a href="http://apache-spark-user-list.1001560.n3.nabble.com/">users@spark.apache.org</a> </li>
     </ul>
   </li>
   <li>Tagging the subject line of your email will help you get a faster response, e.g. 

http://git-wip-us.apache.org/repos/asf/spark-website/blob/e10180e6/site/contributing.html
----------------------------------------------------------------------
diff --git a/site/contributing.html b/site/contributing.html
index 899c0c0..9c1c7b5 100644
--- a/site/contributing.html
+++ b/site/contributing.html
@@ -106,7 +106,7 @@
           Documentation <b class="caret"></b>
         </a>
         <ul class="dropdown-menu">
-          <li><a href="/docs/latest/">Latest Release (Spark 2.0.2)</a></li>
+          <li><a href="/docs/latest/">Latest Release (Spark 2.1.0)</a></li>
           <li><a href="/documentation.html">Older Versions and Other Resources</a></li>
           <li><a href="/faq.html">Frequently Asked Questions</a></li>
         </ul>
@@ -159,6 +159,9 @@
       <h5>Latest News</h5>
       <ul class="list-unstyled">
         
+          <li><a href="/news/spark-2-1-0-released.html">Spark 2.1.0 released</a>
+          <span class="small">(Dec 28, 2016)</span></li>
+        
           <li><a href="/news/spark-wins-cloudsort-100tb-benchmark.html">Spark wins CloudSort Benchmark as the most efficient engine</a>
           <span class="small">(Nov 15, 2016)</span></li>
         
@@ -168,9 +171,6 @@
           <li><a href="/news/spark-1-6-3-released.html">Spark 1.6.3 released</a>
           <span class="small">(Nov 07, 2016)</span></li>
         
-          <li><a href="/news/spark-2-0-1-released.html">Spark 2.0.1 released</a>
-          <span class="small">(Oct 03, 2016)</span></li>
-        
       </ul>
       <p class="small" style="text-align: right;"><a href="/news/index.html">Archive</a></p>
     </div>
@@ -243,7 +243,7 @@ edit the Markdown source files in Spark&#8217;s
 <a href="https://github.com/apache/spark/tree/master/docs"><code>docs/</code></a> directory, 
 whose <code>README</code> file shows how to build the documentation locally to test your changes.
 The process to propose a doc change is otherwise the same as the process for proposing code 
-changes below.</p>
+changes below. </p>
 
 <p>To propose a change to the rest of the documentation (that is, docs that do <em>not</em> appear under 
 <a href="https://spark.apache.org/docs/">https://spark.apache.org/docs/</a>), similarly, edit the Markdown in the 
@@ -346,7 +346,7 @@ or similar search tools.
 Often, the problem has been discussed before, with a resolution that doesn&#8217;t require a code 
 change, or recording what kinds of changes will not be accepted as a resolution.</li>
   <li>Search JIRA for existing issues: 
-<a href="https://issues.apache.org/jira/browse/SPARK">https://issues.apache.org/jira/browse/SPARK</a></li>
+<a href="https://issues.apache.org/jira/browse/SPARK">https://issues.apache.org/jira/browse/SPARK</a> </li>
   <li>Type <code>spark [search terms]</code> at the top right search box. If a logically similar issue already 
 exists, then contribute to the discussion on the existing JIRA and pull request first, instead of 
 creating a new one.</li>
@@ -397,7 +397,7 @@ rather than receive iterations of review.</p>
   <li>Introduces complex new functionality, especially an API that needs to be supported</li>
   <li>Adds complexity that only helps a niche use case</li>
   <li>Adds user-space functionality that does not need to be maintained in Spark, but could be hosted 
-externally and indexed by <a href="http://spark-packages.org/">spark-packages.org</a></li>
+externally and indexed by <a href="http://spark-packages.org/">spark-packages.org</a> </li>
   <li>Changes a public API or semantics (rarely allowed)</li>
   <li>Adds large dependencies</li>
   <li>Changes versions of existing dependencies</li>
@@ -456,7 +456,7 @@ Example: <code>Fix typos in Foo scaladoc</code></li>
   and there is a workaround</li>
               <li>Minor: a niche use case is missing some support, but it does not affect usage or 
   is easily worked around</li>
-              <li>Trivial: a nice-to-have change but unlikely to be any problem in practice otherwise</li>
+              <li>Trivial: a nice-to-have change but unlikely to be any problem in practice otherwise </li>
             </ol>
           </li>
           <li><strong>Component</strong></li>
@@ -680,13 +680,11 @@ instead of Scala docs style.</p>
 
 <p>Always import packages using absolute paths (e.g. <code>scala.util.Random</code>) instead of relative ones 
 (e.g. <code>util.Random</code>). In addition, sort imports in the following order 
-(use alphabetical order within each group):</p>
-<ul>
-  <li><code>java.*</code> and <code>javax.*</code></li>
-  <li><code>scala.*</code></li>
-  <li>Third-party libraries (<code>org.*</code>, <code>com.*</code>, etc)</li>
-  <li>Project classes (<code>org.apache.spark.*</code>)</li>
-</ul>
+(use alphabetical order within each group):
+- <code>java.*</code> and <code>javax.*</code>
+- <code>scala.*</code>
+- Third-party libraries (<code>org.*</code>, <code>com.*</code>, etc)
+- Project classes (<code>org.apache.spark.*</code>)</p>
 
 <p>The <a href="https://plugins.jetbrains.com/plugin/7350">IntelliJ import organizer plugin</a> 
 can organize imports for you. Use this configuration for the plugin (configured under 

http://git-wip-us.apache.org/repos/asf/spark-website/blob/e10180e6/site/developer-tools.html
----------------------------------------------------------------------
diff --git a/site/developer-tools.html b/site/developer-tools.html
index 0de49c6..a8495af 100644
--- a/site/developer-tools.html
+++ b/site/developer-tools.html
@@ -106,7 +106,7 @@
           Documentation <b class="caret"></b>
         </a>
         <ul class="dropdown-menu">
-          <li><a href="/docs/latest/">Latest Release (Spark 2.0.2)</a></li>
+          <li><a href="/docs/latest/">Latest Release (Spark 2.1.0)</a></li>
           <li><a href="/documentation.html">Older Versions and Other Resources</a></li>
           <li><a href="/faq.html">Frequently Asked Questions</a></li>
         </ul>
@@ -159,6 +159,9 @@
       <h5>Latest News</h5>
       <ul class="list-unstyled">
         
+          <li><a href="/news/spark-2-1-0-released.html">Spark 2.1.0 released</a>
+          <span class="small">(Dec 28, 2016)</span></li>
+        
           <li><a href="/news/spark-wins-cloudsort-100tb-benchmark.html">Spark wins CloudSort Benchmark as the most efficient engine</a>
           <span class="small">(Nov 15, 2016)</span></li>
         
@@ -168,9 +171,6 @@
           <li><a href="/news/spark-1-6-3-released.html">Spark 1.6.3 released</a>
           <span class="small">(Nov 07, 2016)</span></li>
         
-          <li><a href="/news/spark-2-0-1-released.html">Spark 2.0.1 released</a>
-          <span class="small">(Oct 03, 2016)</span></li>
-        
       </ul>
       <p class="small" style="text-align: right;"><a href="/news/index.html">Archive</a></p>
     </div>
@@ -354,13 +354,13 @@ compiler options&#8221; field.  It will work then although the option will come
 reimports.  If you try to build any of the projects using quasiquotes (eg., sql) then you will 
 need to make that jar a compiler plugin (just below &#8220;Additional compiler options&#8221;). 
 Otherwise you will see errors like:
-    <pre><code>/Users/irashid/github/spark/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala
+<code>
+/Users/irashid/github/spark/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala
 Error:(147, 9) value q is not a member of StringContext
  Note: implicit class Evaluate2 is not applicable here because it comes after the application point and it lacks an explicit result type
       q"""
       ^ 
-</code></pre>
-  </li>
+</code></li>
 </ul>
 
 <h4>Eclipse</h4>
@@ -443,12 +443,12 @@ consider mirroring this file or including it on a custom AMI.</li>
   <li>Copy the expanded YourKit files to each node using copy-dir: <code>~/spark-ec2/copy-dir /root/yjp-12.0.5</code></li>
   <li>Configure the Spark JVMs to use the YourKit profiling agent by editing <code>~/spark/conf/spark-env.sh</code> 
 and adding the lines
-    <pre><code>SPARK_DAEMON_JAVA_OPTS+=" -agentpath:/root/yjp-12.0.5/bin/linux-x86-64/libyjpagent.so=sampling"
+<code>
+SPARK_DAEMON_JAVA_OPTS+=" -agentpath:/root/yjp-12.0.5/bin/linux-x86-64/libyjpagent.so=sampling"
 export SPARK_DAEMON_JAVA_OPTS
 SPARK_JAVA_OPTS+=" -agentpath:/root/yjp-12.0.5/bin/linux-x86-64/libyjpagent.so=sampling"
 export SPARK_JAVA_OPTS
-</code></pre>
-  </li>
+</code></li>
   <li>Copy the updated configuration to each node: <code>~/spark-ec2/copy-dir ~/spark/conf/spark-env.sh</code></li>
   <li>Restart your Spark cluster: <code>~/spark/bin/stop-all.sh</code> and <code>~/spark/bin/start-all.sh</code></li>
   <li>By default, the YourKit profiler agents use ports 10001-10010. To connect the YourKit desktop 
@@ -473,7 +473,7 @@ cluster with the same name, your security group settings will be re-used.</li>
 <h4>In Spark unit tests</h4>
 
 <p>When running Spark tests through SBT, add <code>javaOptions in Test += "-agentpath:/path/to/yjp"</code>
-to <code>SparkBuild.scala</code> to launch the tests with the YourKit profiler agent enabled.<br />
+to <code>SparkBuild.scala</code> to launch the tests with the YourKit profiler agent enabled.  
 The platform-specific paths to the profiler agents are listed in the 
 <a href="http://www.yourkit.com/docs/80/help/agent.jsp">YourKit documentation</a>.</p>
 

http://git-wip-us.apache.org/repos/asf/spark-website/blob/e10180e6/site/docs/latest
----------------------------------------------------------------------
diff --git a/site/docs/latest b/site/docs/latest
index f93ea0c..5308748 120000
--- a/site/docs/latest
+++ b/site/docs/latest
@@ -1 +1 @@
-2.0.2
\ No newline at end of file
+2.1.0/
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/spark-website/blob/e10180e6/site/documentation.html
----------------------------------------------------------------------
diff --git a/site/documentation.html b/site/documentation.html
index 8d911da..f4a2c81 100644
--- a/site/documentation.html
+++ b/site/documentation.html
@@ -106,7 +106,7 @@
           Documentation <b class="caret"></b>
         </a>
         <ul class="dropdown-menu">
-          <li><a href="/docs/latest/">Latest Release (Spark 2.0.2)</a></li>
+          <li><a href="/docs/latest/">Latest Release (Spark 2.1.0)</a></li>
           <li><a href="/documentation.html">Older Versions and Other Resources</a></li>
           <li><a href="/faq.html">Frequently Asked Questions</a></li>
         </ul>
@@ -159,6 +159,9 @@
       <h5>Latest News</h5>
       <ul class="list-unstyled">
         
+          <li><a href="/news/spark-2-1-0-released.html">Spark 2.1.0 released</a>
+          <span class="small">(Dec 28, 2016)</span></li>
+        
           <li><a href="/news/spark-wins-cloudsort-100tb-benchmark.html">Spark wins CloudSort Benchmark as the most efficient engine</a>
           <span class="small">(Nov 15, 2016)</span></li>
         
@@ -168,9 +171,6 @@
           <li><a href="/news/spark-1-6-3-released.html">Spark 1.6.3 released</a>
           <span class="small">(Nov 07, 2016)</span></li>
         
-          <li><a href="/news/spark-2-0-1-released.html">Spark 2.0.1 released</a>
-          <span class="small">(Oct 03, 2016)</span></li>
-        
       </ul>
       <p class="small" style="text-align: right;"><a href="/news/index.html">Archive</a></p>
     </div>
@@ -197,6 +197,7 @@
 <p>Setup instructions, programming guides, and other documentation are available for each stable version of Spark below:</p>
 
 <ul>
+  <li><a href="/docs/2.1.0/">Spark 2.1.0</a></li>
   <li><a href="/docs/2.0.2/">Spark 2.0.2</a></li>
   <li><a href="/docs/2.0.1/">Spark 2.0.1</a></li>
   <li><a href="/docs/2.0.0/">Spark 2.0.0</a></li>
@@ -265,13 +266,12 @@
 </ul>
 
 <h4><a name="meetup-videos"></a>Meetup Talk Videos</h4>
-<p>In addition to the videos listed below, you can also view <a href="http://www.meetup.com/spark-users/files/">all slides from Bay Area meetups here</a>.</p>
+<p>In addition to the videos listed below, you can also view <a href="http://www.meetup.com/spark-users/files/">all slides from Bay Area meetups here</a>.
 <style type="text/css">
   .video-meta-info {
     font-size: 0.95em;
   }
-</style>
-
+</style></p>
 <ul>
   <li><a href="http://www.youtube.com/watch?v=NUQ-8to2XAk&amp;list=PL-x35fyliRwiP3YteXbnhk0QGOtYLBT3a">Spark 1.0 and Beyond</a> (<a href="http://files.meetup.com/3138542/Spark%201.0%20Meetup.ppt">slides</a>) <span class="video-meta-info">by Patrick Wendell, at Cisco in San Jose, 2014-04-23</span></li>
 

http://git-wip-us.apache.org/repos/asf/spark-website/blob/e10180e6/site/downloads.html
----------------------------------------------------------------------
diff --git a/site/downloads.html b/site/downloads.html
index aa88bba..0d1dded 100644
--- a/site/downloads.html
+++ b/site/downloads.html
@@ -106,7 +106,7 @@
           Documentation <b class="caret"></b>
         </a>
         <ul class="dropdown-menu">
-          <li><a href="/docs/latest/">Latest Release (Spark 2.0.2)</a></li>
+          <li><a href="/docs/latest/">Latest Release (Spark 2.1.0)</a></li>
           <li><a href="/documentation.html">Older Versions and Other Resources</a></li>
           <li><a href="/faq.html">Frequently Asked Questions</a></li>
         </ul>
@@ -159,6 +159,9 @@
       <h5>Latest News</h5>
       <ul class="list-unstyled">
         
+          <li><a href="/news/spark-2-1-0-released.html">Spark 2.1.0 released</a>
+          <span class="small">(Dec 28, 2016)</span></li>
+        
           <li><a href="/news/spark-wins-cloudsort-100tb-benchmark.html">Spark wins CloudSort Benchmark as the most efficient engine</a>
           <span class="small">(Nov 15, 2016)</span></li>
         
@@ -168,9 +171,6 @@
           <li><a href="/news/spark-1-6-3-released.html">Spark 1.6.3 released</a>
           <span class="small">(Nov 07, 2016)</span></li>
         
-          <li><a href="/news/spark-2-0-1-released.html">Spark 2.0.1 released</a>
-          <span class="small">(Oct 03, 2016)</span></li>
-        
       </ul>
       <p class="small" style="text-align: right;"><a href="/news/index.html">Archive</a></p>
     </div>
@@ -244,7 +244,7 @@ You can select and download it above.
 
 <pre><code>groupId: org.apache.spark
 artifactId: spark-core_2.11
-version: 2.0.2
+version: 2.1.0
 </code></pre>
 
 <h3 id="spark-source-code-management">Spark Source Code Management</h3>
@@ -253,8 +253,8 @@ version: 2.0.2
 <pre><code># Master development branch
 git clone git://github.com/apache/spark.git
 
-# 2.0 maintenance branch with stability fixes on top of Spark 2.0.2
-git clone git://github.com/apache/spark.git -b branch-2.0
+# 2.1 maintenance branch with stability fixes on top of Spark 2.1.0
+git clone git://github.com/apache/spark.git -b branch-2.1
 </code></pre>
 
 <p>Once you&#8217;ve downloaded Spark, you can find instructions for installing and building it on the <a href="/documentation.html">documentation page</a>.</p>

http://git-wip-us.apache.org/repos/asf/spark-website/blob/e10180e6/site/examples.html
----------------------------------------------------------------------
diff --git a/site/examples.html b/site/examples.html
index 18ece51..eec81a2 100644
--- a/site/examples.html
+++ b/site/examples.html
@@ -106,7 +106,7 @@
           Documentation <b class="caret"></b>
         </a>
         <ul class="dropdown-menu">
-          <li><a href="/docs/latest/">Latest Release (Spark 2.0.2)</a></li>
+          <li><a href="/docs/latest/">Latest Release (Spark 2.1.0)</a></li>
           <li><a href="/documentation.html">Older Versions and Other Resources</a></li>
           <li><a href="/faq.html">Frequently Asked Questions</a></li>
         </ul>
@@ -159,6 +159,9 @@
       <h5>Latest News</h5>
       <ul class="list-unstyled">
         
+          <li><a href="/news/spark-2-1-0-released.html">Spark 2.1.0 released</a>
+          <span class="small">(Dec 28, 2016)</span></li>
+        
           <li><a href="/news/spark-wins-cloudsort-100tb-benchmark.html">Spark wins CloudSort Benchmark as the most efficient engine</a>
           <span class="small">(Nov 15, 2016)</span></li>
         
@@ -168,9 +171,6 @@
           <li><a href="/news/spark-1-6-3-released.html">Spark 1.6.3 released</a>
           <span class="small">(Nov 07, 2016)</span></li>
         
-          <li><a href="/news/spark-2-0-1-released.html">Spark 2.0.1 released</a>
-          <span class="small">(Oct 03, 2016)</span></li>
-        
       </ul>
       <p class="small" style="text-align: right;"><a href="/news/index.html">Archive</a></p>
     </div>
@@ -222,11 +222,11 @@ In this page, we will show examples using RDD API as well as examples using high
 <div class="tab-pane tab-pane-python active">
 <div class="code code-tab">
 
-<figure class="highlight"><pre><code class="language-python" data-lang="python"><span class="n">text_file</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">textFile</span><span class="p">(</span><span class="s">&quot;hdfs://...&quot;</span><span class="p">)</span>
-<span class="n">counts</span> <span class="o">=</span> <span class="n">text_file</span><span class="o">.</span><span class="n">flatMap</span><span class="p">(</span><span class="k">lambda</span> <span class="n">line</span><span class="p">:</span> <span class="n">line</span><span class="o">.</span><span class="n">split</span><span class="p">(</span><span class="s">&quot; &quot;</span><span class="p">))</span> \
+<figure class="highlight"><pre><code class="language-python" data-lang="python"><span></span><span class="n">text_file</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">textFile</span><span class="p">(</span><span class="s2">&quot;hdfs://...&quot;</span><span class="p">)</span>
+<span class="n">counts</span> <span class="o">=</span> <span class="n">text_file</span><span class="o">.</span><span class="n">flatMap</span><span class="p">(</span><span class="k">lambda</span> <span class="n">line</span><span class="p">:</span> <span class="n">line</span><span class="o">.</span><span class="n">split</span><span class="p">(</span><span class="s2">&quot; &quot;</span><span class="p">))</span> \
              <span class="o">.</span><span class="n">map</span><span class="p">(</span><span class="k">lambda</span> <span class="n">word</span><span class="p">:</span> <span class="p">(</span><span class="n">word</span><span class="p">,</span> <span class="mi">1</span><span class="p">))</span> \
              <span class="o">.</span><span class="n">reduceByKey</span><span class="p">(</span><span class="k">lambda</span> <span class="n">a</span><span class="p">,</span> <span class="n">b</span><span class="p">:</span> <span class="n">a</span> <span class="o">+</span> <span class="n">b</span><span class="p">)</span>
-<span class="n">counts</span><span class="o">.</span><span class="n">saveAsTextFile</span><span class="p">(</span><span class="s">&quot;hdfs://...&quot;</span><span class="p">)</span></code></pre></figure>
+<span class="n">counts</span><span class="o">.</span><span class="n">saveAsTextFile</span><span class="p">(</span><span class="s2">&quot;hdfs://...&quot;</span><span class="p">)</span></code></pre></figure>
 
 </div>
 </div>
@@ -234,7 +234,7 @@ In this page, we will show examples using RDD API as well as examples using high
 <div class="tab-pane tab-pane-scala">
 <div class="code code-tab">
 
-<figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="k">val</span> <span class="n">textFile</span> <span class="k">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">textFile</span><span class="o">(</span><span class="s">&quot;hdfs://...&quot;</span><span class="o">)</span>
+<figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span></span><span class="k">val</span> <span class="n">textFile</span> <span class="k">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">textFile</span><span class="o">(</span><span class="s">&quot;hdfs://...&quot;</span><span class="o">)</span>
 <span class="k">val</span> <span class="n">counts</span> <span class="k">=</span> <span class="n">textFile</span><span class="o">.</span><span class="n">flatMap</span><span class="o">(</span><span class="n">line</span> <span class="k">=&gt;</span> <span class="n">line</span><span class="o">.</span><span class="n">split</span><span class="o">(</span><span class="s">&quot; &quot;</span><span class="o">))</span>
                  <span class="o">.</span><span class="n">map</span><span class="o">(</span><span class="n">word</span> <span class="k">=&gt;</span> <span class="o">(</span><span class="n">word</span><span class="o">,</span> <span class="mi">1</span><span class="o">))</span>
                  <span class="o">.</span><span class="n">reduceByKey</span><span class="o">(</span><span class="k">_</span> <span class="o">+</span> <span class="k">_</span><span class="o">)</span>
@@ -246,7 +246,7 @@ In this page, we will show examples using RDD API as well as examples using high
 <div class="tab-pane tab-pane-java">
 <div class="code code-tab">
 
-<figure class="highlight"><pre><code class="language-java" data-lang="java"><span class="n">JavaRDD</span><span class="o">&lt;</span><span class="n">String</span><span class="o">&gt;</span> <span class="n">textFile</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="na">textFile</span><span class="o">(</span><span class="s">&quot;hdfs://...&quot;</span><span class="o">);</span>
+<figure class="highlight"><pre><code class="language-java" data-lang="java"><span></span><span class="n">JavaRDD</span><span class="o">&lt;</span><span class="n">String</span><span class="o">&gt;</span> <span class="n">textFile</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="na">textFile</span><span class="o">(</span><span class="s">&quot;hdfs://...&quot;</span><span class="o">);</span>
 <span class="n">JavaRDD</span><span class="o">&lt;</span><span class="n">String</span><span class="o">&gt;</span> <span class="n">words</span> <span class="o">=</span> <span class="n">textFile</span><span class="o">.</span><span class="na">flatMap</span><span class="o">(</span><span class="k">new</span> <span class="n">FlatMapFunction</span><span class="o">&lt;</span><span class="n">String</span><span class="o">,</span> <span class="n">String</span><span class="o">&gt;()</span> <span class="o">{</span>
   <span class="kd">public</span> <span class="n">Iterable</span><span class="o">&lt;</span><span class="n">String</span><span class="o">&gt;</span> <span class="nf">call</span><span class="o">(</span><span class="n">String</span> <span class="n">s</span><span class="o">)</span> <span class="o">{</span> <span class="k">return</span> <span class="n">Arrays</span><span class="o">.</span><span class="na">asList</span><span class="o">(</span><span class="n">s</span><span class="o">.</span><span class="na">split</span><span class="o">(</span><span class="s">&quot; &quot;</span><span class="o">));</span> <span class="o">}</span>
 <span class="o">});</span>
@@ -275,13 +275,13 @@ In this page, we will show examples using RDD API as well as examples using high
 <div class="tab-pane tab-pane-python active">
 <div class="code code-tab">
 
-<figure class="highlight"><pre><code class="language-python" data-lang="python"><span class="k">def</span> <span class="nf">sample</span><span class="p">(</span><span class="n">p</span><span class="p">):</span>
+<figure class="highlight"><pre><code class="language-python" data-lang="python"><span></span><span class="k">def</span> <span class="nf">sample</span><span class="p">(</span><span class="n">p</span><span class="p">):</span>
     <span class="n">x</span><span class="p">,</span> <span class="n">y</span> <span class="o">=</span> <span class="n">random</span><span class="p">(),</span> <span class="n">random</span><span class="p">()</span>
     <span class="k">return</span> <span class="mi">1</span> <span class="k">if</span> <span class="n">x</span><span class="o">*</span><span class="n">x</span> <span class="o">+</span> <span class="n">y</span><span class="o">*</span><span class="n">y</span> <span class="o">&lt;</span> <span class="mi">1</span> <span class="k">else</span> <span class="mi">0</span>
 
 <span class="n">count</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">parallelize</span><span class="p">(</span><span class="nb">xrange</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="n">NUM_SAMPLES</span><span class="p">))</span><span class="o">.</span><span class="n">map</span><span class="p">(</span><span class="n">sample</span><span class="p">)</span> \
              <span class="o">.</span><span class="n">reduce</span><span class="p">(</span><span class="k">lambda</span> <span class="n">a</span><span class="p">,</span> <span class="n">b</span><span class="p">:</span> <span class="n">a</span> <span class="o">+</span> <span class="n">b</span><span class="p">)</span>
-<span class="k">print</span> <span class="s">&quot;Pi is roughly </span><span class="si">%f</span><span class="s">&quot;</span> <span class="o">%</span> <span class="p">(</span><span class="mf">4.0</span> <span class="o">*</span> <span class="n">count</span> <span class="o">/</span> <span class="n">NUM_SAMPLES</span><span class="p">)</span></code></pre></figure>
+<span class="k">print</span> <span class="s2">&quot;Pi is roughly </span><span class="si">%f</span><span class="s2">&quot;</span> <span class="o">%</span> <span class="p">(</span><span class="mf">4.0</span> <span class="o">*</span> <span class="n">count</span> <span class="o">/</span> <span class="n">NUM_SAMPLES</span><span class="p">)</span></code></pre></figure>
 
 </div>
 </div>
@@ -289,7 +289,7 @@ In this page, we will show examples using RDD API as well as examples using high
 <div class="tab-pane tab-pane-scala">
 <div class="code code-tab">
 
-<figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="k">val</span> <span class="n">count</span> <span class="k">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">parallelize</span><span class="o">(</span><span class="mi">1</span> <span class="n">to</span> <span class="nc">NUM_SAMPLES</span><span class="o">).</span><span class="n">map</span><span class="o">{</span><span class="n">i</span> <span class="k">=&gt;</span>
+<figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span></span><span class="k">val</span> <span class="n">count</span> <span class="k">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">parallelize</span><span class="o">(</span><span class="mi">1</span> <span class="n">to</span> <span class="nc">NUM_SAMPLES</span><span class="o">).</span><span class="n">map</span><span class="o">{</span><span class="n">i</span> <span class="k">=&gt;</span>
   <span class="k">val</span> <span class="n">x</span> <span class="k">=</span> <span class="nc">Math</span><span class="o">.</span><span class="n">random</span><span class="o">()</span>
   <span class="k">val</span> <span class="n">y</span> <span class="k">=</span> <span class="nc">Math</span><span class="o">.</span><span class="n">random</span><span class="o">()</span>
   <span class="k">if</span> <span class="o">(</span><span class="n">x</span><span class="o">*</span><span class="n">x</span> <span class="o">+</span> <span class="n">y</span><span class="o">*</span><span class="n">y</span> <span class="o">&lt;</span> <span class="mi">1</span><span class="o">)</span> <span class="mi">1</span> <span class="k">else</span> <span class="mi">0</span>
@@ -302,7 +302,7 @@ In this page, we will show examples using RDD API as well as examples using high
 <div class="tab-pane tab-pane-java">
 <div class="code code-tab">
 
-<figure class="highlight"><pre><code class="language-java" data-lang="java"><span class="n">List</span><span class="o">&lt;</span><span class="n">Integer</span><span class="o">&gt;</span> <span class="n">l</span> <span class="o">=</span> <span class="k">new</span> <span class="n">ArrayList</span><span class="o">&lt;</span><span class="n">Integer</span><span class="o">&gt;(</span><span class="n">NUM_SAMPLES</span><span class="o">);</span>
+<figure class="highlight"><pre><code class="language-java" data-lang="java"><span></span><span class="n">List</span><span class="o">&lt;</span><span class="n">Integer</span><span class="o">&gt;</span> <span class="n">l</span> <span class="o">=</span> <span class="k">new</span> <span class="n">ArrayList</span><span class="o">&lt;</span><span class="n">Integer</span><span class="o">&gt;(</span><span class="n">NUM_SAMPLES</span><span class="o">);</span>
 <span class="k">for</span> <span class="o">(</span><span class="kt">int</span> <span class="n">i</span> <span class="o">=</span> <span class="mi">0</span><span class="o">;</span> <span class="n">i</span> <span class="o">&lt;</span> <span class="n">NUM_SAMPLES</span><span class="o">;</span> <span class="n">i</span><span class="o">++)</span> <span class="o">{</span>
   <span class="n">l</span><span class="o">.</span><span class="na">add</span><span class="o">(</span><span class="n">i</span><span class="o">);</span>
 <span class="o">}</span>
@@ -342,17 +342,17 @@ Also, programs based on DataFrame API will be automatically optimized by Spark
 <div class="tab-pane tab-pane-python active">
 <div class="code code-tab">
 
-<figure class="highlight"><pre><code class="language-python" data-lang="python"><span class="n">textFile</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">textFile</span><span class="p">(</span><span class="s">&quot;hdfs://...&quot;</span><span class="p">)</span>
+<figure class="highlight"><pre><code class="language-python" data-lang="python"><span></span><span class="n">textFile</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">textFile</span><span class="p">(</span><span class="s2">&quot;hdfs://...&quot;</span><span class="p">)</span>
 
-<span class="c"># Creates a DataFrame having a single column named &quot;line&quot;</span>
-<span class="n">df</span> <span class="o">=</span> <span class="n">textFile</span><span class="o">.</span><span class="n">map</span><span class="p">(</span><span class="k">lambda</span> <span class="n">r</span><span class="p">:</span> <span class="n">Row</span><span class="p">(</span><span class="n">r</span><span class="p">))</span><span class="o">.</span><span class="n">toDF</span><span class="p">([</span><span class="s">&quot;line&quot;</span><span class="p">])</span>
-<span class="n">errors</span> <span class="o">=</span> <span class="n">df</span><span class="o">.</span><span class="n">filter</span><span class="p">(</span><span class="n">col</span><span class="p">(</span><span class="s">&quot;line&quot;</span><span class="p">)</span><span class="o">.</span><span class="n">like</span><span class="p">(</span><span class="s">&quot;</span><span class="si">%E</span><span class="s">RROR%&quot;</span><span class="p">))</span>
-<span class="c"># Counts all the errors</span>
+<span class="c1"># Creates a DataFrame having a single column named &quot;line&quot;</span>
+<span class="n">df</span> <span class="o">=</span> <span class="n">textFile</span><span class="o">.</span><span class="n">map</span><span class="p">(</span><span class="k">lambda</span> <span class="n">r</span><span class="p">:</span> <span class="n">Row</span><span class="p">(</span><span class="n">r</span><span class="p">))</span><span class="o">.</span><span class="n">toDF</span><span class="p">([</span><span class="s2">&quot;line&quot;</span><span class="p">])</span>
+<span class="n">errors</span> <span class="o">=</span> <span class="n">df</span><span class="o">.</span><span class="n">filter</span><span class="p">(</span><span class="n">col</span><span class="p">(</span><span class="s2">&quot;line&quot;</span><span class="p">)</span><span class="o">.</span><span class="n">like</span><span class="p">(</span><span class="s2">&quot;</span><span class="si">%E</span><span class="s2">RROR%&quot;</span><span class="p">))</span>
+<span class="c1"># Counts all the errors</span>
 <span class="n">errors</span><span class="o">.</span><span class="n">count</span><span class="p">()</span>
-<span class="c"># Counts errors mentioning MySQL</span>
-<span class="n">errors</span><span class="o">.</span><span class="n">filter</span><span class="p">(</span><span class="n">col</span><span class="p">(</span><span class="s">&quot;line&quot;</span><span class="p">)</span><span class="o">.</span><span class="n">like</span><span class="p">(</span><span class="s">&quot;%MySQL%&quot;</span><span class="p">))</span><span class="o">.</span><span class="n">count</span><span class="p">()</span>
-<span class="c"># Fetches the MySQL errors as an array of strings</span>
-<span class="n">errors</span><span class="o">.</span><span class="n">filter</span><span class="p">(</span><span class="n">col</span><span class="p">(</span><span class="s">&quot;line&quot;</span><span class="p">)</span><span class="o">.</span><span class="n">like</span><span class="p">(</span><span class="s">&quot;%MySQL%&quot;</span><span class="p">))</span><span class="o">.</span><span class="n">collect</span><span class="p">()</span></code></pre></figure>
+<span class="c1"># Counts errors mentioning MySQL</span>
+<span class="n">errors</span><span class="o">.</span><span class="n">filter</span><span class="p">(</span><span class="n">col</span><span class="p">(</span><span class="s2">&quot;line&quot;</span><span class="p">)</span><span class="o">.</span><span class="n">like</span><span class="p">(</span><span class="s2">&quot;%MySQL%&quot;</span><span class="p">))</span><span class="o">.</span><span class="n">count</span><span class="p">()</span>
+<span class="c1"># Fetches the MySQL errors as an array of strings</span>
+<span class="n">errors</span><span class="o">.</span><span class="n">filter</span><span class="p">(</span><span class="n">col</span><span class="p">(</span><span class="s2">&quot;line&quot;</span><span class="p">)</span><span class="o">.</span><span class="n">like</span><span class="p">(</span><span class="s2">&quot;%MySQL%&quot;</span><span class="p">))</span><span class="o">.</span><span class="n">collect</span><span class="p">()</span></code></pre></figure>
 
 </div>
 </div>
@@ -360,7 +360,7 @@ Also, programs based on DataFrame API will be automatically optimized by Spark
 <div class="tab-pane tab-pane-scala">
 <div class="code code-tab">
 
-<figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="k">val</span> <span class="n">textFile</span> <span class="k">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">textFile</span><span class="o">(</span><span class="s">&quot;hdfs://...&quot;</span><span class="o">)</span>
+<figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span></span><span class="k">val</span> <span class="n">textFile</span> <span class="k">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">textFile</span><span class="o">(</span><span class="s">&quot;hdfs://...&quot;</span><span class="o">)</span>
 
 <span class="c1">// Creates a DataFrame having a single column named &quot;line&quot;</span>
 <span class="k">val</span> <span class="n">df</span> <span class="k">=</span> <span class="n">textFile</span><span class="o">.</span><span class="n">toDF</span><span class="o">(</span><span class="s">&quot;line&quot;</span><span class="o">)</span>
@@ -378,7 +378,7 @@ Also, programs based on DataFrame API will be automatically optimized by Spark
 <div class="tab-pane tab-pane-java">
 <div class="code code-tab">
 
-<figure class="highlight"><pre><code class="language-java" data-lang="java"><span class="c1">// Creates a DataFrame having a single column named &quot;line&quot;</span>
+<figure class="highlight"><pre><code class="language-java" data-lang="java"><span></span><span class="c1">// Creates a DataFrame having a single column named &quot;line&quot;</span>
 <span class="n">JavaRDD</span><span class="o">&lt;</span><span class="n">String</span><span class="o">&gt;</span> <span class="n">textFile</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="na">textFile</span><span class="o">(</span><span class="s">&quot;hdfs://...&quot;</span><span class="o">);</span>
 <span class="n">JavaRDD</span><span class="o">&lt;</span><span class="n">Row</span><span class="o">&gt;</span> <span class="n">rowRDD</span> <span class="o">=</span> <span class="n">textFile</span><span class="o">.</span><span class="na">map</span><span class="o">(</span>
   <span class="k">new</span> <span class="n">Function</span><span class="o">&lt;</span><span class="n">String</span><span class="o">,</span> <span class="n">Row</span><span class="o">&gt;()</span> <span class="o">{</span>
@@ -421,26 +421,26 @@ A simple MySQL table "people" is used in the example and this table has two colu
 <div class="tab-pane tab-pane-python active">
 <div class="code code-tab">
 
-<figure class="highlight"><pre><code class="language-python" data-lang="python"><span class="c"># Creates a DataFrame based on a table named &quot;people&quot;</span>
-<span class="c"># stored in a MySQL database.</span>
+<figure class="highlight"><pre><code class="language-python" data-lang="python"><span></span><span class="c1"># Creates a DataFrame based on a table named &quot;people&quot;</span>
+<span class="c1"># stored in a MySQL database.</span>
 <span class="n">url</span> <span class="o">=</span> \
-  <span class="s">&quot;jdbc:mysql://yourIP:yourPort/test?user=yourUsername;password=yourPassword&quot;</span>
+  <span class="s2">&quot;jdbc:mysql://yourIP:yourPort/test?user=yourUsername;password=yourPassword&quot;</span>
 <span class="n">df</span> <span class="o">=</span> <span class="n">sqlContext</span> \
   <span class="o">.</span><span class="n">read</span> \
-  <span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="s">&quot;jdbc&quot;</span><span class="p">)</span> \
-  <span class="o">.</span><span class="n">option</span><span class="p">(</span><span class="s">&quot;url&quot;</span><span class="p">,</span> <span class="n">url</span><span class="p">)</span> \
-  <span class="o">.</span><span class="n">option</span><span class="p">(</span><span class="s">&quot;dbtable&quot;</span><span class="p">,</span> <span class="s">&quot;people&quot;</span><span class="p">)</span> \
+  <span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="s2">&quot;jdbc&quot;</span><span class="p">)</span> \
+  <span class="o">.</span><span class="n">option</span><span class="p">(</span><span class="s2">&quot;url&quot;</span><span class="p">,</span> <span class="n">url</span><span class="p">)</span> \
+  <span class="o">.</span><span class="n">option</span><span class="p">(</span><span class="s2">&quot;dbtable&quot;</span><span class="p">,</span> <span class="s2">&quot;people&quot;</span><span class="p">)</span> \
   <span class="o">.</span><span class="n">load</span><span class="p">()</span>
 
-<span class="c"># Looks the schema of this DataFrame.</span>
+<span class="c1"># Looks the schema of this DataFrame.</span>
 <span class="n">df</span><span class="o">.</span><span class="n">printSchema</span><span class="p">()</span>
 
-<span class="c"># Counts people by age</span>
-<span class="n">countsByAge</span> <span class="o">=</span> <span class="n">df</span><span class="o">.</span><span class="n">groupBy</span><span class="p">(</span><span class="s">&quot;age&quot;</span><span class="p">)</span><span class="o">.</span><span class="n">count</span><span class="p">()</span>
+<span class="c1"># Counts people by age</span>
+<span class="n">countsByAge</span> <span class="o">=</span> <span class="n">df</span><span class="o">.</span><span class="n">groupBy</span><span class="p">(</span><span class="s2">&quot;age&quot;</span><span class="p">)</span><span class="o">.</span><span class="n">count</span><span class="p">()</span>
 <span class="n">countsByAge</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
 
-<span class="c"># Saves countsByAge to S3 in the JSON format.</span>
-<span class="n">countsByAge</span><span class="o">.</span><span class="n">write</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="s">&quot;json&quot;</span><span class="p">)</span><span class="o">.</span><span class="n">save</span><span class="p">(</span><span class="s">&quot;s3a://...&quot;</span><span class="p">)</span></code></pre></figure>
+<span class="c1"># Saves countsByAge to S3 in the JSON format.</span>
+<span class="n">countsByAge</span><span class="o">.</span><span class="n">write</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="s2">&quot;json&quot;</span><span class="p">)</span><span class="o">.</span><span class="n">save</span><span class="p">(</span><span class="s2">&quot;s3a://...&quot;</span><span class="p">)</span></code></pre></figure>
 
 </div>
 </div>
@@ -448,7 +448,7 @@ A simple MySQL table "people" is used in the example and this table has two colu
 <div class="tab-pane tab-pane-scala">
 <div class="code code-tab">
 
-<figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="c1">// Creates a DataFrame based on a table named &quot;people&quot;</span>
+<figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span></span><span class="c1">// Creates a DataFrame based on a table named &quot;people&quot;</span>
 <span class="c1">// stored in a MySQL database.</span>
 <span class="k">val</span> <span class="n">url</span> <span class="k">=</span>
   <span class="s">&quot;jdbc:mysql://yourIP:yourPort/test?user=yourUsername;password=yourPassword&quot;</span>
@@ -475,7 +475,7 @@ A simple MySQL table "people" is used in the example and this table has two colu
 <div class="tab-pane tab-pane-java">
 <div class="code code-tab">
 
-<figure class="highlight"><pre><code class="language-java" data-lang="java"><span class="c1">// Creates a DataFrame based on a table named &quot;people&quot;</span>
+<figure class="highlight"><pre><code class="language-java" data-lang="java"><span></span><span class="c1">// Creates a DataFrame based on a table named &quot;people&quot;</span>
 <span class="c1">// stored in a MySQL database.</span>
 <span class="n">String</span> <span class="n">url</span> <span class="o">=</span>
   <span class="s">&quot;jdbc:mysql://yourIP:yourPort/test?user=yourUsername;password=yourPassword&quot;</span><span class="o">;</span>
@@ -525,18 +525,18 @@ We learn to predict the labels from feature vectors using the Logistic Regressio
 <div class="tab-pane tab-pane-python active">
 <div class="code code-tab">
 
-<figure class="highlight"><pre><code class="language-python" data-lang="python"><span class="c"># Every record of this DataFrame contains the label and</span>
-<span class="c"># features represented by a vector.</span>
-<span class="n">df</span> <span class="o">=</span> <span class="n">sqlContext</span><span class="o">.</span><span class="n">createDataFrame</span><span class="p">(</span><span class="n">data</span><span class="p">,</span> <span class="p">[</span><span class="s">&quot;label&quot;</span><span class="p">,</span> <span class="s">&quot;features&quot;</span><span class="p">])</span>
+<figure class="highlight"><pre><code class="language-python" data-lang="python"><span></span><span class="c1"># Every record of this DataFrame contains the label and</span>
+<span class="c1"># features represented by a vector.</span>
+<span class="n">df</span> <span class="o">=</span> <span class="n">sqlContext</span><span class="o">.</span><span class="n">createDataFrame</span><span class="p">(</span><span class="n">data</span><span class="p">,</span> <span class="p">[</span><span class="s2">&quot;label&quot;</span><span class="p">,</span> <span class="s2">&quot;features&quot;</span><span class="p">])</span>
 
-<span class="c"># Set parameters for the algorithm.</span>
-<span class="c"># Here, we limit the number of iterations to 10.</span>
+<span class="c1"># Set parameters for the algorithm.</span>
+<span class="c1"># Here, we limit the number of iterations to 10.</span>
 <span class="n">lr</span> <span class="o">=</span> <span class="n">LogisticRegression</span><span class="p">(</span><span class="n">maxIter</span><span class="o">=</span><span class="mi">10</span><span class="p">)</span>
 
-<span class="c"># Fit the model to the data.</span>
+<span class="c1"># Fit the model to the data.</span>
 <span class="n">model</span> <span class="o">=</span> <span class="n">lr</span><span class="o">.</span><span class="n">fit</span><span class="p">(</span><span class="n">df</span><span class="p">)</span>
 
-<span class="c"># Given a dataset, predict each point&#39;s label, and show the results.</span>
+<span class="c1"># Given a dataset, predict each point&#39;s label, and show the results.</span>
 <span class="n">model</span><span class="o">.</span><span class="n">transform</span><span class="p">(</span><span class="n">df</span><span class="p">)</span><span class="o">.</span><span class="n">show</span><span class="p">()</span></code></pre></figure>
 
 </div>
@@ -545,7 +545,7 @@ We learn to predict the labels from feature vectors using the Logistic Regressio
 <div class="tab-pane tab-pane-scala">
 <div class="code code-tab">
 
-<figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="c1">// Every record of this DataFrame contains the label and</span>
+<figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span></span><span class="c1">// Every record of this DataFrame contains the label and</span>
 <span class="c1">// features represented by a vector.</span>
 <span class="k">val</span> <span class="n">df</span> <span class="k">=</span> <span class="n">sqlContext</span><span class="o">.</span><span class="n">createDataFrame</span><span class="o">(</span><span class="n">data</span><span class="o">).</span><span class="n">toDF</span><span class="o">(</span><span class="s">&quot;label&quot;</span><span class="o">,</span> <span class="s">&quot;features&quot;</span><span class="o">)</span>
 
@@ -568,17 +568,17 @@ We learn to predict the labels from feature vectors using the Logistic Regressio
 <div class="tab-pane tab-pane-java">
 <div class="code code-tab">
 
-<figure class="highlight"><pre><code class="language-java" data-lang="java"><span class="c1">// Every record of this DataFrame contains the label and</span>
+<figure class="highlight"><pre><code class="language-java" data-lang="java"><span></span><span class="c1">// Every record of this DataFrame contains the label and</span>
 <span class="c1">// features represented by a vector.</span>
-<span class="n">StructType</span> <span class="n">schema</span> <span class="o">=</span> <span class="k">new</span> <span class="nf">StructType</span><span class="o">(</span><span class="k">new</span> <span class="n">StructField</span><span class="o">[]{</span>
-  <span class="k">new</span> <span class="nf">StructField</span><span class="o">(</span><span class="s">&quot;label&quot;</span><span class="o">,</span> <span class="n">DataTypes</span><span class="o">.</span><span class="na">DoubleType</span><span class="o">,</span> <span class="kc">false</span><span class="o">,</span> <span class="n">Metadata</span><span class="o">.</span><span class="na">empty</span><span class="o">()),</span>
-  <span class="k">new</span> <span class="nf">StructField</span><span class="o">(</span><span class="s">&quot;features&quot;</span><span class="o">,</span> <span class="k">new</span> <span class="nf">VectorUDT</span><span class="o">(),</span> <span class="kc">false</span><span class="o">,</span> <span class="n">Metadata</span><span class="o">.</span><span class="na">empty</span><span class="o">()),</span>
+<span class="n">StructType</span> <span class="n">schema</span> <span class="o">=</span> <span class="k">new</span> <span class="n">StructType</span><span class="o">(</span><span class="k">new</span> <span class="n">StructField</span><span class="o">[]{</span>
+  <span class="k">new</span> <span class="n">StructField</span><span class="o">(</span><span class="s">&quot;label&quot;</span><span class="o">,</span> <span class="n">DataTypes</span><span class="o">.</span><span class="na">DoubleType</span><span class="o">,</span> <span class="kc">false</span><span class="o">,</span> <span class="n">Metadata</span><span class="o">.</span><span class="na">empty</span><span class="o">()),</span>
+  <span class="k">new</span> <span class="n">StructField</span><span class="o">(</span><span class="s">&quot;features&quot;</span><span class="o">,</span> <span class="k">new</span> <span class="n">VectorUDT</span><span class="o">(),</span> <span class="kc">false</span><span class="o">,</span> <span class="n">Metadata</span><span class="o">.</span><span class="na">empty</span><span class="o">()),</span>
 <span class="o">});</span>
 <span class="n">DataFrame</span> <span class="n">df</span> <span class="o">=</span> <span class="n">jsql</span><span class="o">.</span><span class="na">createDataFrame</span><span class="o">(</span><span class="n">data</span><span class="o">,</span> <span class="n">schema</span><span class="o">);</span>
 
 <span class="c1">// Set parameters for the algorithm.</span>
 <span class="c1">// Here, we limit the number of iterations to 10.</span>
-<span class="n">LogisticRegression</span> <span class="n">lr</span> <span class="o">=</span> <span class="k">new</span> <span class="nf">LogisticRegression</span><span class="o">().</span><span class="na">setMaxIter</span><span class="o">(</span><span class="mi">10</span><span class="o">);</span>
+<span class="n">LogisticRegression</span> <span class="n">lr</span> <span class="o">=</span> <span class="k">new</span> <span class="n">LogisticRegression</span><span class="o">().</span><span class="na">setMaxIter</span><span class="o">(</span><span class="mi">10</span><span class="o">);</span>
 
 <span class="c1">// Fit the model to the data.</span>
 <span class="n">LogisticRegressionModel</span> <span class="n">model</span> <span class="o">=</span> <span class="n">lr</span><span class="o">.</span><span class="na">fit</span><span class="o">(</span><span class="n">df</span><span class="o">);</span>

http://git-wip-us.apache.org/repos/asf/spark-website/blob/e10180e6/site/faq.html
----------------------------------------------------------------------
diff --git a/site/faq.html b/site/faq.html
index cc8e0b9..457dfa6 100644
--- a/site/faq.html
+++ b/site/faq.html
@@ -106,7 +106,7 @@
           Documentation <b class="caret"></b>
         </a>
         <ul class="dropdown-menu">
-          <li><a href="/docs/latest/">Latest Release (Spark 2.0.2)</a></li>
+          <li><a href="/docs/latest/">Latest Release (Spark 2.1.0)</a></li>
           <li><a href="/documentation.html">Older Versions and Other Resources</a></li>
           <li><a href="/faq.html">Frequently Asked Questions</a></li>
         </ul>
@@ -159,6 +159,9 @@
       <h5>Latest News</h5>
       <ul class="list-unstyled">
         
+          <li><a href="/news/spark-2-1-0-released.html">Spark 2.1.0 released</a>
+          <span class="small">(Dec 28, 2016)</span></li>
+        
           <li><a href="/news/spark-wins-cloudsort-100tb-benchmark.html">Spark wins CloudSort Benchmark as the most efficient engine</a>
           <span class="small">(Nov 15, 2016)</span></li>
         
@@ -168,9 +171,6 @@
           <li><a href="/news/spark-1-6-3-released.html">Spark 1.6.3 released</a>
           <span class="small">(Nov 07, 2016)</span></li>
         
-          <li><a href="/news/spark-2-0-1-released.html">Spark 2.0.1 released</a>
-          <span class="small">(Oct 03, 2016)</span></li>
-        
       </ul>
       <p class="small" style="text-align: right;"><a href="/news/index.html">Archive</a></p>
     </div>

http://git-wip-us.apache.org/repos/asf/spark-website/blob/e10180e6/site/graphx/index.html
----------------------------------------------------------------------
diff --git a/site/graphx/index.html b/site/graphx/index.html
index d3251bf..700fada 100644
--- a/site/graphx/index.html
+++ b/site/graphx/index.html
@@ -109,7 +109,7 @@
           Documentation <b class="caret"></b>
         </a>
         <ul class="dropdown-menu">
-          <li><a href="/docs/latest/">Latest Release (Spark 2.0.2)</a></li>
+          <li><a href="/docs/latest/">Latest Release (Spark 2.1.0)</a></li>
           <li><a href="/documentation.html">Older Versions and Other Resources</a></li>
           <li><a href="/faq.html">Frequently Asked Questions</a></li>
         </ul>
@@ -162,6 +162,9 @@
       <h5>Latest News</h5>
       <ul class="list-unstyled">
         
+          <li><a href="/news/spark-2-1-0-released.html">Spark 2.1.0 released</a>
+          <span class="small">(Dec 28, 2016)</span></li>
+        
           <li><a href="/news/spark-wins-cloudsort-100tb-benchmark.html">Spark wins CloudSort Benchmark as the most efficient engine</a>
           <span class="small">(Nov 15, 2016)</span></li>
         
@@ -171,9 +174,6 @@
           <li><a href="/news/spark-1-6-3-released.html">Spark 1.6.3 released</a>
           <span class="small">(Nov 07, 2016)</span></li>
         
-          <li><a href="/news/spark-2-0-1-released.html">Spark 2.0.1 released</a>
-          <span class="small">(Oct 03, 2016)</span></li>
-        
       </ul>
       <p class="small" style="text-align: right;"><a href="/news/index.html">Archive</a></p>
     </div>

http://git-wip-us.apache.org/repos/asf/spark-website/blob/e10180e6/site/index.html
----------------------------------------------------------------------
diff --git a/site/index.html b/site/index.html
index 0ddfb81..fd66406 100644
--- a/site/index.html
+++ b/site/index.html
@@ -108,7 +108,7 @@
           Documentation <b class="caret"></b>
         </a>
         <ul class="dropdown-menu">
-          <li><a href="/docs/latest/">Latest Release (Spark 2.0.2)</a></li>
+          <li><a href="/docs/latest/">Latest Release (Spark 2.1.0)</a></li>
           <li><a href="/documentation.html">Older Versions and Other Resources</a></li>
           <li><a href="/faq.html">Frequently Asked Questions</a></li>
         </ul>
@@ -161,6 +161,9 @@
       <h5>Latest News</h5>
       <ul class="list-unstyled">
         
+          <li><a href="/news/spark-2-1-0-released.html">Spark 2.1.0 released</a>
+          <span class="small">(Dec 28, 2016)</span></li>
+        
           <li><a href="/news/spark-wins-cloudsort-100tb-benchmark.html">Spark wins CloudSort Benchmark as the most efficient engine</a>
           <span class="small">(Nov 15, 2016)</span></li>
         
@@ -170,9 +173,6 @@
           <li><a href="/news/spark-1-6-3-released.html">Spark 1.6.3 released</a>
           <span class="small">(Nov 07, 2016)</span></li>
         
-          <li><a href="/news/spark-2-0-1-released.html">Spark 2.0.1 released</a>
-          <span class="small">(Oct 03, 2016)</span></li>
-        
       </ul>
       <p class="small" style="text-align: right;"><a href="/news/index.html">Archive</a></p>
     </div>

http://git-wip-us.apache.org/repos/asf/spark-website/blob/e10180e6/site/js/downloads.js
----------------------------------------------------------------------
diff --git a/site/js/downloads.js b/site/js/downloads.js
index a7a5482..36a04c7 100644
--- a/site/js/downloads.js
+++ b/site/js/downloads.js
@@ -36,6 +36,7 @@ var packagesV7 = [hadoop2p7, hadoop2p6, hadoop2p4, hadoop2p3, hadoopFree, source
 
 // addRelease("2.0.0-preview", new Date("05/24/2016"), sources.concat(packagesV7), true, false);
 
+addRelease("2.1.0", new Date("12/28/2016"), packagesV7, true);
 addRelease("2.0.2", new Date("11/14/2016"), packagesV7, true);
 addRelease("2.0.1", new Date("10/03/2016"), packagesV7, true);
 addRelease("2.0.0", new Date("07/26/2016"), packagesV7, true);


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org