You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by yh...@apache.org on 2016/12/28 22:35:20 UTC

[09/25] spark-website git commit: Update 2.1.0 docs to include https://github.com/apache/spark/pull/16294

http://git-wip-us.apache.org/repos/asf/spark-website/blob/d2bcf185/site/docs/2.1.0/programming-guide.html
----------------------------------------------------------------------
diff --git a/site/docs/2.1.0/programming-guide.html b/site/docs/2.1.0/programming-guide.html
index 12458af..0e06e86 100644
--- a/site/docs/2.1.0/programming-guide.html
+++ b/site/docs/2.1.0/programming-guide.html
@@ -129,50 +129,50 @@
                     
 
                     <ul id="markdown-toc">
-  <li><a href="#overview" id="markdown-toc-overview">Overview</a></li>
-  <li><a href="#linking-with-spark" id="markdown-toc-linking-with-spark">Linking with Spark</a></li>
-  <li><a href="#initializing-spark" id="markdown-toc-initializing-spark">Initializing Spark</a>    <ul>
-      <li><a href="#using-the-shell" id="markdown-toc-using-the-shell">Using the Shell</a></li>
+  <li><a href="#overview">Overview</a></li>
+  <li><a href="#linking-with-spark">Linking with Spark</a></li>
+  <li><a href="#initializing-spark">Initializing Spark</a>    <ul>
+      <li><a href="#using-the-shell">Using the Shell</a></li>
     </ul>
   </li>
-  <li><a href="#resilient-distributed-datasets-rdds" id="markdown-toc-resilient-distributed-datasets-rdds">Resilient Distributed Datasets (RDDs)</a>    <ul>
-      <li><a href="#parallelized-collections" id="markdown-toc-parallelized-collections">Parallelized Collections</a></li>
-      <li><a href="#external-datasets" id="markdown-toc-external-datasets">External Datasets</a></li>
-      <li><a href="#rdd-operations" id="markdown-toc-rdd-operations">RDD Operations</a>        <ul>
-          <li><a href="#basics" id="markdown-toc-basics">Basics</a></li>
-          <li><a href="#passing-functions-to-spark" id="markdown-toc-passing-functions-to-spark">Passing Functions to Spark</a></li>
-          <li><a href="#understanding-closures-a-nameclosureslinka" id="markdown-toc-understanding-closures-a-nameclosureslinka">Understanding closures <a name="ClosuresLink"></a></a>            <ul>
-              <li><a href="#example" id="markdown-toc-example">Example</a></li>
-              <li><a href="#local-vs-cluster-modes" id="markdown-toc-local-vs-cluster-modes">Local vs. cluster modes</a></li>
-              <li><a href="#printing-elements-of-an-rdd" id="markdown-toc-printing-elements-of-an-rdd">Printing elements of an RDD</a></li>
+  <li><a href="#resilient-distributed-datasets-rdds">Resilient Distributed Datasets (RDDs)</a>    <ul>
+      <li><a href="#parallelized-collections">Parallelized Collections</a></li>
+      <li><a href="#external-datasets">External Datasets</a></li>
+      <li><a href="#rdd-operations">RDD Operations</a>        <ul>
+          <li><a href="#basics">Basics</a></li>
+          <li><a href="#passing-functions-to-spark">Passing Functions to Spark</a></li>
+          <li><a href="#understanding-closures-a-nameclosureslinka">Understanding closures <a name="ClosuresLink"></a></a>            <ul>
+              <li><a href="#example">Example</a></li>
+              <li><a href="#local-vs-cluster-modes">Local vs. cluster modes</a></li>
+              <li><a href="#printing-elements-of-an-rdd">Printing elements of an RDD</a></li>
             </ul>
           </li>
-          <li><a href="#working-with-key-value-pairs" id="markdown-toc-working-with-key-value-pairs">Working with Key-Value Pairs</a></li>
-          <li><a href="#transformations" id="markdown-toc-transformations">Transformations</a></li>
-          <li><a href="#actions" id="markdown-toc-actions">Actions</a></li>
-          <li><a href="#shuffle-operations" id="markdown-toc-shuffle-operations">Shuffle operations</a>            <ul>
-              <li><a href="#background" id="markdown-toc-background">Background</a></li>
-              <li><a href="#performance-impact" id="markdown-toc-performance-impact">Performance Impact</a></li>
+          <li><a href="#working-with-key-value-pairs">Working with Key-Value Pairs</a></li>
+          <li><a href="#transformations">Transformations</a></li>
+          <li><a href="#actions">Actions</a></li>
+          <li><a href="#shuffle-operations">Shuffle operations</a>            <ul>
+              <li><a href="#background">Background</a></li>
+              <li><a href="#performance-impact">Performance Impact</a></li>
             </ul>
           </li>
         </ul>
       </li>
-      <li><a href="#rdd-persistence" id="markdown-toc-rdd-persistence">RDD Persistence</a>        <ul>
-          <li><a href="#which-storage-level-to-choose" id="markdown-toc-which-storage-level-to-choose">Which Storage Level to Choose?</a></li>
-          <li><a href="#removing-data" id="markdown-toc-removing-data">Removing Data</a></li>
+      <li><a href="#rdd-persistence">RDD Persistence</a>        <ul>
+          <li><a href="#which-storage-level-to-choose">Which Storage Level to Choose?</a></li>
+          <li><a href="#removing-data">Removing Data</a></li>
         </ul>
       </li>
     </ul>
   </li>
-  <li><a href="#shared-variables" id="markdown-toc-shared-variables">Shared Variables</a>    <ul>
-      <li><a href="#broadcast-variables" id="markdown-toc-broadcast-variables">Broadcast Variables</a></li>
-      <li><a href="#accumulators" id="markdown-toc-accumulators">Accumulators</a></li>
+  <li><a href="#shared-variables">Shared Variables</a>    <ul>
+      <li><a href="#broadcast-variables">Broadcast Variables</a></li>
+      <li><a href="#accumulators">Accumulators</a></li>
     </ul>
   </li>
-  <li><a href="#deploying-to-a-cluster" id="markdown-toc-deploying-to-a-cluster">Deploying to a Cluster</a></li>
-  <li><a href="#launching-spark-jobs-from-java--scala" id="markdown-toc-launching-spark-jobs-from-java--scala">Launching Spark jobs from Java / Scala</a></li>
-  <li><a href="#unit-testing" id="markdown-toc-unit-testing">Unit Testing</a></li>
-  <li><a href="#where-to-go-from-here" id="markdown-toc-where-to-go-from-here">Where to Go from Here</a></li>
+  <li><a href="#deploying-to-a-cluster">Deploying to a Cluster</a></li>
+  <li><a href="#launching-spark-jobs-from-java--scala">Launching Spark jobs from Java / Scala</a></li>
+  <li><a href="#unit-testing">Unit Testing</a></li>
+  <li><a href="#where-to-go-from-here">Where to Go from Here</a></li>
 </ul>
 
 <h1 id="overview">Overview</h1>
@@ -212,8 +212,8 @@ version = &lt;your-hdfs-version&gt;
 
     <p>Finally, you need to import some Spark classes into your program. Add the following lines:</p>
 
-    <div class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="k">import</span> <span class="nn">org.apache.spark.SparkContext</span>
-<span class="k">import</span> <span class="nn">org.apache.spark.SparkConf</span></code></pre></div>
+    <figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span></span><span class="k">import</span> <span class="nn">org.apache.spark.SparkContext</span>
+<span class="k">import</span> <span class="nn">org.apache.spark.SparkConf</span></code></pre></figure>
 
     <p>(Before Spark 1.3.0, you need to explicitly <code>import org.apache.spark.SparkContext._</code> to enable essential implicit conversions.)</p>
 
@@ -245,9 +245,9 @@ version = &lt;your-hdfs-version&gt;
 
     <p>Finally, you need to import some Spark classes into your program. Add the following lines:</p>
 
-    <div class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="k">import</span> <span class="nn">org.apache.spark.api.java.JavaSparkContext</span>
+    <figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span></span><span class="k">import</span> <span class="nn">org.apache.spark.api.java.JavaSparkContext</span>
 <span class="k">import</span> <span class="nn">org.apache.spark.api.java.JavaRDD</span>
-<span class="k">import</span> <span class="nn">org.apache.spark.SparkConf</span></code></pre></div>
+<span class="k">import</span> <span class="nn">org.apache.spark.SparkConf</span></code></pre></figure>
 
   </div>
 
@@ -269,13 +269,13 @@ for common HDFS versions.</p>
 
     <p>Finally, you need to import some Spark classes into your program. Add the following line:</p>
 
-    <div class="highlight"><pre><code class="language-python" data-lang="python"><span class="kn">from</span> <span class="nn">pyspark</span> <span class="kn">import</span> <span class="n">SparkContext</span><span class="p">,</span> <span class="n">SparkConf</span></code></pre></div>
+    <figure class="highlight"><pre><code class="language-python" data-lang="python"><span></span><span class="kn">from</span> <span class="nn">pyspark</span> <span class="kn">import</span> <span class="n">SparkContext</span><span class="p">,</span> <span class="n">SparkConf</span></code></pre></figure>
 
     <p>PySpark requires the same minor version of Python in both driver and workers. It uses the default python version in PATH,
 you can specify which version of Python you want to use by <code>PYSPARK_PYTHON</code>, for example:</p>
 
-    <div class="highlight"><pre><code class="language-bash" data-lang="bash"><span class="nv">$ PYSPARK_PYTHON</span><span class="o">=</span>python3.4 bin/pyspark
-<span class="nv">$ PYSPARK_PYTHON</span><span class="o">=</span>/opt/pypy-2.5/bin/pypy bin/spark-submit examples/src/main/python/pi.py</code></pre></div>
+    <figure class="highlight"><pre><code class="language-bash" data-lang="bash"><span></span>$ <span class="nv">PYSPARK_PYTHON</span><span class="o">=</span>python3.4 bin/pyspark
+$ <span class="nv">PYSPARK_PYTHON</span><span class="o">=</span>/opt/pypy-2.5/bin/pypy bin/spark-submit examples/src/main/python/pi.py</code></pre></figure>
 
   </div>
 
@@ -293,8 +293,8 @@ that contains information about your application.</p>
 
     <p>Only one SparkContext may be active per JVM.  You must <code>stop()</code> the active SparkContext before creating a new one.</p>
 
-    <div class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="k">val</span> <span class="n">conf</span> <span class="k">=</span> <span class="k">new</span> <span class="nc">SparkConf</span><span class="o">().</span><span class="n">setAppName</span><span class="o">(</span><span class="n">appName</span><span class="o">).</span><span class="n">setMaster</span><span class="o">(</span><span class="n">master</span><span class="o">)</span>
-<span class="k">new</span> <span class="nc">SparkContext</span><span class="o">(</span><span class="n">conf</span><span class="o">)</span></code></pre></div>
+    <figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span></span><span class="k">val</span> <span class="n">conf</span> <span class="k">=</span> <span class="k">new</span> <span class="nc">SparkConf</span><span class="o">().</span><span class="n">setAppName</span><span class="o">(</span><span class="n">appName</span><span class="o">).</span><span class="n">setMaster</span><span class="o">(</span><span class="n">master</span><span class="o">)</span>
+<span class="k">new</span> <span class="nc">SparkContext</span><span class="o">(</span><span class="n">conf</span><span class="o">)</span></code></pre></figure>
 
   </div>
 
@@ -304,8 +304,8 @@ that contains information about your application.</p>
 how to access a cluster. To create a <code>SparkContext</code> you first need to build a <a href="api/java/index.html?org/apache/spark/SparkConf.html">SparkConf</a> object
 that contains information about your application.</p>
 
-    <div class="highlight"><pre><code class="language-java" data-lang="java"><span class="n">SparkConf</span> <span class="n">conf</span> <span class="o">=</span> <span class="k">new</span> <span class="nf">SparkConf</span><span class="o">().</span><span class="na">setAppName</span><span class="o">(</span><span class="n">appName</span><span class="o">).</span><span class="na">setMaster</span><span class="o">(</span><span class="n">master</span><span class="o">);</span>
-<span class="n">JavaSparkContext</span> <span class="n">sc</span> <span class="o">=</span> <span class="k">new</span> <span class="nf">JavaSparkContext</span><span class="o">(</span><span class="n">conf</span><span class="o">);</span></code></pre></div>
+    <figure class="highlight"><pre><code class="language-java" data-lang="java"><span></span><span class="n">SparkConf</span> <span class="n">conf</span> <span class="o">=</span> <span class="k">new</span> <span class="n">SparkConf</span><span class="o">().</span><span class="na">setAppName</span><span class="o">(</span><span class="n">appName</span><span class="o">).</span><span class="na">setMaster</span><span class="o">(</span><span class="n">master</span><span class="o">);</span>
+<span class="n">JavaSparkContext</span> <span class="n">sc</span> <span class="o">=</span> <span class="k">new</span> <span class="n">JavaSparkContext</span><span class="o">(</span><span class="n">conf</span><span class="o">);</span></code></pre></figure>
 
   </div>
 
@@ -315,8 +315,8 @@ that contains information about your application.</p>
 how to access a cluster. To create a <code>SparkContext</code> you first need to build a <a href="api/python/pyspark.html#pyspark.SparkConf">SparkConf</a> object
 that contains information about your application.</p>
 
-    <div class="highlight"><pre><code class="language-python" data-lang="python"><span class="n">conf</span> <span class="o">=</span> <span class="n">SparkConf</span><span class="p">()</span><span class="o">.</span><span class="n">setAppName</span><span class="p">(</span><span class="n">appName</span><span class="p">)</span><span class="o">.</span><span class="n">setMaster</span><span class="p">(</span><span class="n">master</span><span class="p">)</span>
-<span class="n">sc</span> <span class="o">=</span> <span class="n">SparkContext</span><span class="p">(</span><span class="n">conf</span><span class="o">=</span><span class="n">conf</span><span class="p">)</span></code></pre></div>
+    <figure class="highlight"><pre><code class="language-python" data-lang="python"><span></span><span class="n">conf</span> <span class="o">=</span> <span class="n">SparkConf</span><span class="p">()</span><span class="o">.</span><span class="n">setAppName</span><span class="p">(</span><span class="n">appName</span><span class="p">)</span><span class="o">.</span><span class="n">setMaster</span><span class="p">(</span><span class="n">master</span><span class="p">)</span>
+<span class="n">sc</span> <span class="o">=</span> <span class="n">SparkContext</span><span class="p">(</span><span class="n">conf</span><span class="o">=</span><span class="n">conf</span><span class="p">)</span></code></pre></figure>
 
   </div>
 
@@ -345,15 +345,15 @@ to the <code>--packages</code> argument. Any additional repositories where depen
 can be passed to the <code>--repositories</code> argument. For example, to run <code>bin/spark-shell</code> on exactly
 four cores, use:</p>
 
-    <div class="highlight"><pre><code class="language-bash" data-lang="bash"><span class="nv">$ </span>./bin/spark-shell --master <span class="nb">local</span><span class="o">[</span>4<span class="o">]</span></code></pre></div>
+    <figure class="highlight"><pre><code class="language-bash" data-lang="bash"><span></span>$ ./bin/spark-shell --master local<span class="o">[</span><span class="m">4</span><span class="o">]</span></code></pre></figure>
 
     <p>Or, to also add <code>code.jar</code> to its classpath, use:</p>
 
-    <div class="highlight"><pre><code class="language-bash" data-lang="bash"><span class="nv">$ </span>./bin/spark-shell --master <span class="nb">local</span><span class="o">[</span>4<span class="o">]</span> --jars code.jar</code></pre></div>
+    <figure class="highlight"><pre><code class="language-bash" data-lang="bash"><span></span>$ ./bin/spark-shell --master local<span class="o">[</span><span class="m">4</span><span class="o">]</span> --jars code.jar</code></pre></figure>
 
     <p>To include a dependency using maven coordinates:</p>
 
-    <div class="highlight"><pre><code class="language-bash" data-lang="bash"><span class="nv">$ </span>./bin/spark-shell --master <span class="nb">local</span><span class="o">[</span>4<span class="o">]</span> --packages <span class="s2">&quot;org.example:example:0.1&quot;</span></code></pre></div>
+    <figure class="highlight"><pre><code class="language-bash" data-lang="bash"><span></span>$ ./bin/spark-shell --master local<span class="o">[</span><span class="m">4</span><span class="o">]</span> --packages <span class="s2">&quot;org.example:example:0.1&quot;</span></code></pre></figure>
 
     <p>For a complete list of options, run <code>spark-shell --help</code>. Behind the scenes,
 <code>spark-shell</code> invokes the more general <a href="submitting-applications.html"><code>spark-submit</code> script</a>.</p>
@@ -372,11 +372,11 @@ can be passed to the <code>--repositories</code> argument. Any Python dependenci
 the requirements.txt of that package) must be manually installed using <code>pip</code> when necessary.
 For example, to run <code>bin/pyspark</code> on exactly four cores, use:</p>
 
-    <div class="highlight"><pre><code class="language-bash" data-lang="bash"><span class="nv">$ </span>./bin/pyspark --master <span class="nb">local</span><span class="o">[</span>4<span class="o">]</span></code></pre></div>
+    <figure class="highlight"><pre><code class="language-bash" data-lang="bash"><span></span>$ ./bin/pyspark --master local<span class="o">[</span><span class="m">4</span><span class="o">]</span></code></pre></figure>
 
     <p>Or, to also add <code>code.py</code> to the search path (in order to later be able to <code>import code</code>), use:</p>
 
-    <div class="highlight"><pre><code class="language-bash" data-lang="bash"><span class="nv">$ </span>./bin/pyspark --master <span class="nb">local</span><span class="o">[</span>4<span class="o">]</span> --py-files code.py</code></pre></div>
+    <figure class="highlight"><pre><code class="language-bash" data-lang="bash"><span></span>$ ./bin/pyspark --master local<span class="o">[</span><span class="m">4</span><span class="o">]</span> --py-files code.py</code></pre></figure>
 
     <p>For a complete list of options, run <code>pyspark --help</code>. Behind the scenes,
 <code>pyspark</code> invokes the more general <a href="submitting-applications.html"><code>spark-submit</code> script</a>.</p>
@@ -385,13 +385,13 @@ For example, to run <code>bin/pyspark</code> on exactly four cores, use:</p>
 enhanced Python interpreter. PySpark works with IPython 1.0.0 and later. To
 use IPython, set the <code>PYSPARK_DRIVER_PYTHON</code> variable to <code>ipython</code> when running <code>bin/pyspark</code>:</p>
 
-    <div class="highlight"><pre><code class="language-bash" data-lang="bash"><span class="nv">$ PYSPARK_DRIVER_PYTHON</span><span class="o">=</span>ipython ./bin/pyspark</code></pre></div>
+    <figure class="highlight"><pre><code class="language-bash" data-lang="bash"><span></span>$ <span class="nv">PYSPARK_DRIVER_PYTHON</span><span class="o">=</span>ipython ./bin/pyspark</code></pre></figure>
 
-    <p>To use the Jupyter notebook (previously known as the IPython notebook),</p>
+    <p>To use the Jupyter notebook (previously known as the IPython notebook), </p>
 
-    <div class="highlight"><pre><code class="language-bash" data-lang="bash"><span class="nv">$ PYSPARK_DRIVER_PYTHON</span><span class="o">=</span>jupyter ./bin/pyspark</code></pre></div>
+    <figure class="highlight"><pre><code class="language-bash" data-lang="bash"><span></span>$ <span class="nv">PYSPARK_DRIVER_PYTHON</span><span class="o">=</span>jupyter ./bin/pyspark</code></pre></figure>
 
-    <p>You can customize the <code>ipython</code> or <code>jupyter</code> commands by setting <code>PYSPARK_DRIVER_PYTHON_OPTS</code>.</p>
+    <p>You can customize the <code>ipython</code> or <code>jupyter</code> commands by setting <code>PYSPARK_DRIVER_PYTHON_OPTS</code>. </p>
 
     <p>After the Jupyter Notebook server is launched, you can create a new &#8220;Python 2&#8221; notebook from
 the &#8220;Files&#8221; tab. Inside the notebook, you can input the command <code>%pylab inline</code> as part of
@@ -415,8 +415,8 @@ shared filesystem, HDFS, HBase, or any data source offering a Hadoop InputFormat
 
     <p>Parallelized collections are created by calling <code>SparkContext</code>&#8217;s <code>parallelize</code> method on an existing collection in your driver program (a Scala <code>Seq</code>). The elements of the collection are copied to form a distributed dataset that can be operated on in parallel. For example, here is how to create a parallelized collection holding the numbers 1 to 5:</p>
 
-    <div class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="k">val</span> <span class="n">data</span> <span class="k">=</span> <span class="nc">Array</span><span class="o">(</span><span class="mi">1</span><span class="o">,</span> <span class="mi">2</span><span class="o">,</span> <span class="mi">3</span><span class="o">,</span> <span class="mi">4</span><span class="o">,</span> <span class="mi">5</span><span class="o">)</span>
-<span class="k">val</span> <span class="n">distData</span> <span class="k">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">parallelize</span><span class="o">(</span><span class="n">data</span><span class="o">)</span></code></pre></div>
+    <figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span></span><span class="k">val</span> <span class="n">data</span> <span class="k">=</span> <span class="nc">Array</span><span class="o">(</span><span class="mi">1</span><span class="o">,</span> <span class="mi">2</span><span class="o">,</span> <span class="mi">3</span><span class="o">,</span> <span class="mi">4</span><span class="o">,</span> <span class="mi">5</span><span class="o">)</span>
+<span class="k">val</span> <span class="n">distData</span> <span class="k">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">parallelize</span><span class="o">(</span><span class="n">data</span><span class="o">)</span></code></pre></figure>
 
     <p>Once created, the distributed dataset (<code>distData</code>) can be operated on in parallel. For example, we might call <code>distData.reduce((a, b) =&gt; a + b)</code> to add up the elements of the array. We describe operations on distributed datasets later on.</p>
 
@@ -426,8 +426,8 @@ shared filesystem, HDFS, HBase, or any data source offering a Hadoop InputFormat
 
     <p>Parallelized collections are created by calling <code>JavaSparkContext</code>&#8217;s <code>parallelize</code> method on an existing <code>Collection</code> in your driver program. The elements of the collection are copied to form a distributed dataset that can be operated on in parallel. For example, here is how to create a parallelized collection holding the numbers 1 to 5:</p>
 
-    <div class="highlight"><pre><code class="language-java" data-lang="java"><span class="n">List</span><span class="o">&lt;</span><span class="n">Integer</span><span class="o">&gt;</span> <span class="n">data</span> <span class="o">=</span> <span class="n">Arrays</span><span class="o">.</span><span class="na">asList</span><span class="o">(</span><span class="mi">1</span><span class="o">,</span> <span class="mi">2</span><span class="o">,</span> <span class="mi">3</span><span class="o">,</span> <span class="mi">4</span><span class="o">,</span> <span class="mi">5</span><span class="o">);</span>
-<span class="n">JavaRDD</span><span class="o">&lt;</span><span class="n">Integer</span><span class="o">&gt;</span> <span class="n">distData</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="na">parallelize</span><span class="o">(</span><span class="n">data</span><span class="o">);</span></code></pre></div>
+    <figure class="highlight"><pre><code class="language-java" data-lang="java"><span></span><span class="n">List</span><span class="o">&lt;</span><span class="n">Integer</span><span class="o">&gt;</span> <span class="n">data</span> <span class="o">=</span> <span class="n">Arrays</span><span class="o">.</span><span class="na">asList</span><span class="o">(</span><span class="mi">1</span><span class="o">,</span> <span class="mi">2</span><span class="o">,</span> <span class="mi">3</span><span class="o">,</span> <span class="mi">4</span><span class="o">,</span> <span class="mi">5</span><span class="o">);</span>
+<span class="n">JavaRDD</span><span class="o">&lt;</span><span class="n">Integer</span><span class="o">&gt;</span> <span class="n">distData</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="na">parallelize</span><span class="o">(</span><span class="n">data</span><span class="o">);</span></code></pre></figure>
 
     <p>Once created, the distributed dataset (<code>distData</code>) can be operated on in parallel. For example, we might call <code>distData.reduce((a, b) -&gt; a + b)</code> to add up the elements of the list.
 We describe operations on distributed datasets later on.</p>
@@ -443,8 +443,8 @@ We describe <a href="#passing-functions-to-spark">passing functions to Spark</a>
 
     <p>Parallelized collections are created by calling <code>SparkContext</code>&#8217;s <code>parallelize</code> method on an existing iterable or collection in your driver program. The elements of the collection are copied to form a distributed dataset that can be operated on in parallel. For example, here is how to create a parallelized collection holding the numbers 1 to 5:</p>
 
-    <div class="highlight"><pre><code class="language-python" data-lang="python"><span class="n">data</span> <span class="o">=</span> <span class="p">[</span><span class="mi">1</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">]</span>
-<span class="n">distData</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">parallelize</span><span class="p">(</span><span class="n">data</span><span class="p">)</span></code></pre></div>
+    <figure class="highlight"><pre><code class="language-python" data-lang="python"><span></span><span class="n">data</span> <span class="o">=</span> <span class="p">[</span><span class="mi">1</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">]</span>
+<span class="n">distData</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">parallelize</span><span class="p">(</span><span class="n">data</span><span class="p">)</span></code></pre></figure>
 
     <p>Once created, the distributed dataset (<code>distData</code>) can be operated on in parallel. For example, we can call <code>distData.reduce(lambda a, b: a + b)</code> to add up the elements of the list.
 We describe operations on distributed datasets later on.</p>
@@ -465,8 +465,8 @@ We describe operations on distributed datasets later on.</p>
 
     <p>Text file RDDs can be created using <code>SparkContext</code>&#8217;s <code>textFile</code> method. This method takes an URI for the file (either a local path on the machine, or a <code>hdfs://</code>, <code>s3n://</code>, etc URI) and reads it as a collection of lines. Here is an example invocation:</p>
 
-    <div class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="n">scala</span><span class="o">&gt;</span> <span class="k">val</span> <span class="n">distFile</span> <span class="k">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">textFile</span><span class="o">(</span><span class="s">&quot;data.txt&quot;</span><span class="o">)</span>
-<span class="n">distFile</span><span class="k">:</span> <span class="kt">org.apache.spark.rdd.RDD</span><span class="o">[</span><span class="kt">String</span><span class="o">]</span> <span class="k">=</span> <span class="n">data</span><span class="o">.</span><span class="n">txt</span> <span class="nc">MapPartitionsRDD</span><span class="o">[</span><span class="err">10</span><span class="o">]</span> <span class="n">at</span> <span class="n">textFile</span> <span class="n">at</span> <span class="o">&lt;</span><span class="n">console</span><span class="k">&gt;:</span><span class="mi">26</span></code></pre></div>
+    <figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span></span><span class="n">scala</span><span class="o">&gt;</span> <span class="k">val</span> <span class="n">distFile</span> <span class="k">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">textFile</span><span class="o">(</span><span class="s">&quot;data.txt&quot;</span><span class="o">)</span>
+<span class="n">distFile</span><span class="k">:</span> <span class="kt">org.apache.spark.rdd.RDD</span><span class="o">[</span><span class="kt">String</span><span class="o">]</span> <span class="k">=</span> <span class="n">data</span><span class="o">.</span><span class="n">txt</span> <span class="nc">MapPartitionsRDD</span><span class="o">[</span><span class="err">10</span><span class="o">]</span> <span class="n">at</span> <span class="n">textFile</span> <span class="n">at</span> <span class="o">&lt;</span><span class="n">console</span><span class="k">&gt;:</span><span class="mi">26</span></code></pre></figure>
 
     <p>Once created, <code>distFile</code> can be acted on by dataset operations. For example, we can add up the sizes of all the lines using the <code>map</code> and <code>reduce</code> operations as follows: <code>distFile.map(s =&gt; s.length).reduce((a, b) =&gt; a + b)</code>.</p>
 
@@ -509,7 +509,7 @@ We describe operations on distributed datasets later on.</p>
 
     <p>Text file RDDs can be created using <code>SparkContext</code>&#8217;s <code>textFile</code> method. This method takes an URI for the file (either a local path on the machine, or a <code>hdfs://</code>, <code>s3n://</code>, etc URI) and reads it as a collection of lines. Here is an example invocation:</p>
 
-    <div class="highlight"><pre><code class="language-java" data-lang="java"><span class="n">JavaRDD</span><span class="o">&lt;</span><span class="n">String</span><span class="o">&gt;</span> <span class="n">distFile</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="na">textFile</span><span class="o">(</span><span class="s">&quot;data.txt&quot;</span><span class="o">);</span></code></pre></div>
+    <figure class="highlight"><pre><code class="language-java" data-lang="java"><span></span><span class="n">JavaRDD</span><span class="o">&lt;</span><span class="n">String</span><span class="o">&gt;</span> <span class="n">distFile</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="na">textFile</span><span class="o">(</span><span class="s">&quot;data.txt&quot;</span><span class="o">);</span></code></pre></figure>
 
     <p>Once created, <code>distFile</code> can be acted on by dataset operations. For example, we can add up the sizes of all the lines using the <code>map</code> and <code>reduce</code> operations as follows: <code>distFile.map(s -&gt; s.length()).reduce((a, b) -&gt; a + b)</code>.</p>
 
@@ -552,7 +552,7 @@ We describe operations on distributed datasets later on.</p>
 
     <p>Text file RDDs can be created using <code>SparkContext</code>&#8217;s <code>textFile</code> method. This method takes an URI for the file (either a local path on the machine, or a <code>hdfs://</code>, <code>s3n://</code>, etc URI) and reads it as a collection of lines. Here is an example invocation:</p>
 
-    <div class="highlight"><pre><code class="language-python" data-lang="python"><span class="o">&gt;&gt;&gt;</span> <span class="n">distFile</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">textFile</span><span class="p">(</span><span class="s">&quot;data.txt&quot;</span><span class="p">)</span></code></pre></div>
+    <figure class="highlight"><pre><code class="language-python" data-lang="python"><span></span><span class="o">&gt;&gt;&gt;</span> <span class="n">distFile</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">textFile</span><span class="p">(</span><span class="s2">&quot;data.txt&quot;</span><span class="p">)</span></code></pre></figure>
 
     <p>Once created, <code>distFile</code> can be acted on by dataset operations. For example, we can add up the sizes of all the lines using the <code>map</code> and <code>reduce</code> operations as follows: <code>distFile.map(lambda s: len(s)).reduce(lambda a, b: a + b)</code>.</p>
 
@@ -615,10 +615,10 @@ Python <code>array.array</code> for arrays of primitive types, users need to spe
     <p>Similarly to text files, SequenceFiles can be saved and loaded by specifying the path. The key and value
 classes can be specified, but for standard Writables this is not required.</p>
 
-    <div class="highlight"><pre><code class="language-python" data-lang="python"><span class="o">&gt;&gt;&gt;</span> <span class="n">rdd</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">parallelize</span><span class="p">(</span><span class="nb">range</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">4</span><span class="p">))</span><span class="o">.</span><span class="n">map</span><span class="p">(</span><span class="k">lambda</span> <span class="n">x</span><span class="p">:</span> <span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="s">&quot;a&quot;</span> <span class="o">*</span> <span class="n">x</span><span class="p">))</span>
-<span class="o">&gt;&gt;&gt;</span> <span class="n">rdd</span><span class="o">.</span><span class="n">saveAsSequenceFile</span><span class="p">(</span><span class="s">&quot;path/to/file&quot;</span><span class="p">)</span>
-<span class="o">&gt;&gt;&gt;</span> <span class="nb">sorted</span><span class="p">(</span><span class="n">sc</span><span class="o">.</span><span class="n">sequenceFile</span><span class="p">(</span><span class="s">&quot;path/to/file&quot;</span><span class="p">)</span><span class="o">.</span><span class="n">collect</span><span class="p">())</span>
-<span class="p">[(</span><span class="mi">1</span><span class="p">,</span> <span class="s">u&#39;a&#39;</span><span class="p">),</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="s">u&#39;aa&#39;</span><span class="p">),</span> <span class="p">(</span><span class="mi">3</span><span class="p">,</span> <span class="s">u&#39;aaa&#39;</span><span class="p">)]</span></code></pre></div>
+    <figure class="highlight"><pre><code class="language-python" data-lang="python"><span></span><span class="o">&gt;&gt;&gt;</span> <span class="n">rdd</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">parallelize</span><span class="p">(</span><span class="nb">range</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">4</span><span class="p">))</span><span class="o">.</span><span class="n">map</span><span class="p">(</span><span class="k">lambda</span> <span class="n">x</span><span class="p">:</span> <span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="s2">&quot;a&quot;</span> <span class="o">*</span> <span class="n">x</span><span class="p">))</span>
+<span class="o">&gt;&gt;&gt;</span> <span class="n">rdd</span><span class="o">.</span><span class="n">saveAsSequenceFile</span><span class="p">(</span><span class="s2">&quot;path/to/file&quot;</span><span class="p">)</span>
+<span class="o">&gt;&gt;&gt;</span> <span class="nb">sorted</span><span class="p">(</span><span class="n">sc</span><span class="o">.</span><span class="n">sequenceFile</span><span class="p">(</span><span class="s2">&quot;path/to/file&quot;</span><span class="p">)</span><span class="o">.</span><span class="n">collect</span><span class="p">())</span>
+<span class="p">[(</span><span class="mi">1</span><span class="p">,</span> <span class="sa">u</span><span class="s1">&#39;a&#39;</span><span class="p">),</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="sa">u</span><span class="s1">&#39;aa&#39;</span><span class="p">),</span> <span class="p">(</span><span class="mi">3</span><span class="p">,</span> <span class="sa">u</span><span class="s1">&#39;aaa&#39;</span><span class="p">)]</span></code></pre></figure>
 
     <p><strong>Saving and Loading Other Hadoop Input/Output Formats</strong></p>
 
@@ -626,17 +626,17 @@ classes can be specified, but for standard Writables this is not required.</p>
 If required, a Hadoop configuration can be passed in as a Python dict. Here is an example using the
 Elasticsearch ESInputFormat:</p>
 
-    <div class="highlight"><pre><code class="language-python" data-lang="python"><span class="err">$</span> <span class="n">SPARK_CLASSPATH</span><span class="o">=/</span><span class="n">path</span><span class="o">/</span><span class="n">to</span><span class="o">/</span><span class="n">elasticsearch</span><span class="o">-</span><span class="n">hadoop</span><span class="o">.</span><span class="n">jar</span> <span class="o">./</span><span class="nb">bin</span><span class="o">/</span><span class="n">pyspark</span>
-<span class="o">&gt;&gt;&gt;</span> <span class="n">conf</span> <span class="o">=</span> <span class="p">{</span><span class="s">&quot;es.resource&quot;</span> <span class="p">:</span> <span class="s">&quot;index/type&quot;</span><span class="p">}</span>  <span class="c"># assume Elasticsearch is running on localhost defaults</span>
-<span class="o">&gt;&gt;&gt;</span> <span class="n">rdd</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">newAPIHadoopRDD</span><span class="p">(</span><span class="s">&quot;org.elasticsearch.hadoop.mr.EsInputFormat&quot;</span><span class="p">,</span>
-                             <span class="s">&quot;org.apache.hadoop.io.NullWritable&quot;</span><span class="p">,</span>
-                             <span class="s">&quot;org.elasticsearch.hadoop.mr.LinkedMapWritable&quot;</span><span class="p">,</span>
+    <figure class="highlight"><pre><code class="language-python" data-lang="python"><span></span><span class="err">$</span> <span class="n">SPARK_CLASSPATH</span><span class="o">=/</span><span class="n">path</span><span class="o">/</span><span class="n">to</span><span class="o">/</span><span class="n">elasticsearch</span><span class="o">-</span><span class="n">hadoop</span><span class="o">.</span><span class="n">jar</span> <span class="o">./</span><span class="nb">bin</span><span class="o">/</span><span class="n">pyspark</span>
+<span class="o">&gt;&gt;&gt;</span> <span class="n">conf</span> <span class="o">=</span> <span class="p">{</span><span class="s2">&quot;es.resource&quot;</span> <span class="p">:</span> <span class="s2">&quot;index/type&quot;</span><span class="p">}</span>  <span class="c1"># assume Elasticsearch is running on localhost defaults</span>
+<span class="o">&gt;&gt;&gt;</span> <span class="n">rdd</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">newAPIHadoopRDD</span><span class="p">(</span><span class="s2">&quot;org.elasticsearch.hadoop.mr.EsInputFormat&quot;</span><span class="p">,</span>
+                             <span class="s2">&quot;org.apache.hadoop.io.NullWritable&quot;</span><span class="p">,</span>
+                             <span class="s2">&quot;org.elasticsearch.hadoop.mr.LinkedMapWritable&quot;</span><span class="p">,</span>
                              <span class="n">conf</span><span class="o">=</span><span class="n">conf</span><span class="p">)</span>
-<span class="o">&gt;&gt;&gt;</span> <span class="n">rdd</span><span class="o">.</span><span class="n">first</span><span class="p">()</span>  <span class="c"># the result is a MapWritable that is converted to a Python dict</span>
-<span class="p">(</span><span class="s">u&#39;Elasticsearch ID&#39;</span><span class="p">,</span>
- <span class="p">{</span><span class="s">u&#39;field1&#39;</span><span class="p">:</span> <span class="bp">True</span><span class="p">,</span>
-  <span class="s">u&#39;field2&#39;</span><span class="p">:</span> <span class="s">u&#39;Some Text&#39;</span><span class="p">,</span>
-  <span class="s">u&#39;field3&#39;</span><span class="p">:</span> <span class="mi">12345</span><span class="p">})</span></code></pre></div>
+<span class="o">&gt;&gt;&gt;</span> <span class="n">rdd</span><span class="o">.</span><span class="n">first</span><span class="p">()</span>  <span class="c1"># the result is a MapWritable that is converted to a Python dict</span>
+<span class="p">(</span><span class="sa">u</span><span class="s1">&#39;Elasticsearch ID&#39;</span><span class="p">,</span>
+ <span class="p">{</span><span class="sa">u</span><span class="s1">&#39;field1&#39;</span><span class="p">:</span> <span class="bp">True</span><span class="p">,</span>
+  <span class="sa">u</span><span class="s1">&#39;field2&#39;</span><span class="p">:</span> <span class="sa">u</span><span class="s1">&#39;Some Text&#39;</span><span class="p">,</span>
+  <span class="sa">u</span><span class="s1">&#39;field3&#39;</span><span class="p">:</span> <span class="mi">12345</span><span class="p">})</span></code></pre></figure>
 
     <p>Note that, if the InputFormat simply depends on a Hadoop configuration and/or input path, and
 the key and value classes can easily be converted according to the above table,
@@ -672,9 +672,9 @@ for examples of using Cassandra / HBase <code>InputFormat</code> and <code>Outpu
 
     <p>To illustrate RDD basics, consider the simple program below:</p>
 
-    <div class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="k">val</span> <span class="n">lines</span> <span class="k">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">textFile</span><span class="o">(</span><span class="s">&quot;data.txt&quot;</span><span class="o">)</span>
+    <figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span></span><span class="k">val</span> <span class="n">lines</span> <span class="k">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">textFile</span><span class="o">(</span><span class="s">&quot;data.txt&quot;</span><span class="o">)</span>
 <span class="k">val</span> <span class="n">lineLengths</span> <span class="k">=</span> <span class="n">lines</span><span class="o">.</span><span class="n">map</span><span class="o">(</span><span class="n">s</span> <span class="k">=&gt;</span> <span class="n">s</span><span class="o">.</span><span class="n">length</span><span class="o">)</span>
-<span class="k">val</span> <span class="n">totalLength</span> <span class="k">=</span> <span class="n">lineLengths</span><span class="o">.</span><span class="n">reduce</span><span class="o">((</span><span class="n">a</span><span class="o">,</span> <span class="n">b</span><span class="o">)</span> <span class="k">=&gt;</span> <span class="n">a</span> <span class="o">+</span> <span class="n">b</span><span class="o">)</span></code></pre></div>
+<span class="k">val</span> <span class="n">totalLength</span> <span class="k">=</span> <span class="n">lineLengths</span><span class="o">.</span><span class="n">reduce</span><span class="o">((</span><span class="n">a</span><span class="o">,</span> <span class="n">b</span><span class="o">)</span> <span class="k">=&gt;</span> <span class="n">a</span> <span class="o">+</span> <span class="n">b</span><span class="o">)</span></code></pre></figure>
 
     <p>The first line defines a base RDD from an external file. This dataset is not loaded in memory or
 otherwise acted on: <code>lines</code> is merely a pointer to the file.
@@ -686,7 +686,7 @@ returning only its answer to the driver program.</p>
 
     <p>If we also wanted to use <code>lineLengths</code> again later, we could add:</p>
 
-    <div class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="n">lineLengths</span><span class="o">.</span><span class="n">persist</span><span class="o">()</span></code></pre></div>
+    <figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span></span><span class="n">lineLengths</span><span class="o">.</span><span class="n">persist</span><span class="o">()</span></code></pre></figure>
 
     <p>before the <code>reduce</code>, which would cause <code>lineLengths</code> to be saved in memory after the first time it is computed.</p>
 
@@ -696,9 +696,9 @@ returning only its answer to the driver program.</p>
 
     <p>To illustrate RDD basics, consider the simple program below:</p>
 
-    <div class="highlight"><pre><code class="language-java" data-lang="java"><span class="n">JavaRDD</span><span class="o">&lt;</span><span class="n">String</span><span class="o">&gt;</span> <span class="n">lines</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="na">textFile</span><span class="o">(</span><span class="s">&quot;data.txt&quot;</span><span class="o">);</span>
+    <figure class="highlight"><pre><code class="language-java" data-lang="java"><span></span><span class="n">JavaRDD</span><span class="o">&lt;</span><span class="n">String</span><span class="o">&gt;</span> <span class="n">lines</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="na">textFile</span><span class="o">(</span><span class="s">&quot;data.txt&quot;</span><span class="o">);</span>
 <span class="n">JavaRDD</span><span class="o">&lt;</span><span class="n">Integer</span><span class="o">&gt;</span> <span class="n">lineLengths</span> <span class="o">=</span> <span class="n">lines</span><span class="o">.</span><span class="na">map</span><span class="o">(</span><span class="n">s</span> <span class="o">-&gt;</span> <span class="n">s</span><span class="o">.</span><span class="na">length</span><span class="o">());</span>
-<span class="kt">int</span> <span class="n">totalLength</span> <span class="o">=</span> <span class="n">lineLengths</span><span class="o">.</span><span class="na">reduce</span><span class="o">((</span><span class="n">a</span><span class="o">,</span> <span class="n">b</span><span class="o">)</span> <span class="o">-&gt;</span> <span class="n">a</span> <span class="o">+</span> <span class="n">b</span><span class="o">);</span></code></pre></div>
+<span class="kt">int</span> <span class="n">totalLength</span> <span class="o">=</span> <span class="n">lineLengths</span><span class="o">.</span><span class="na">reduce</span><span class="o">((</span><span class="n">a</span><span class="o">,</span> <span class="n">b</span><span class="o">)</span> <span class="o">-&gt;</span> <span class="n">a</span> <span class="o">+</span> <span class="n">b</span><span class="o">);</span></code></pre></figure>
 
     <p>The first line defines a base RDD from an external file. This dataset is not loaded in memory or
 otherwise acted on: <code>lines</code> is merely a pointer to the file.
@@ -710,7 +710,7 @@ returning only its answer to the driver program.</p>
 
     <p>If we also wanted to use <code>lineLengths</code> again later, we could add:</p>
 
-    <div class="highlight"><pre><code class="language-java" data-lang="java"><span class="n">lineLengths</span><span class="o">.</span><span class="na">persist</span><span class="o">(</span><span class="n">StorageLevel</span><span class="o">.</span><span class="na">MEMORY_ONLY</span><span class="o">());</span></code></pre></div>
+    <figure class="highlight"><pre><code class="language-java" data-lang="java"><span></span><span class="n">lineLengths</span><span class="o">.</span><span class="na">persist</span><span class="o">(</span><span class="n">StorageLevel</span><span class="o">.</span><span class="na">MEMORY_ONLY</span><span class="o">());</span></code></pre></figure>
 
     <p>before the <code>reduce</code>, which would cause <code>lineLengths</code> to be saved in memory after the first time it is computed.</p>
 
@@ -720,9 +720,9 @@ returning only its answer to the driver program.</p>
 
     <p>To illustrate RDD basics, consider the simple program below:</p>
 
-    <div class="highlight"><pre><code class="language-python" data-lang="python"><span class="n">lines</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">textFile</span><span class="p">(</span><span class="s">&quot;data.txt&quot;</span><span class="p">)</span>
+    <figure class="highlight"><pre><code class="language-python" data-lang="python"><span></span><span class="n">lines</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">textFile</span><span class="p">(</span><span class="s2">&quot;data.txt&quot;</span><span class="p">)</span>
 <span class="n">lineLengths</span> <span class="o">=</span> <span class="n">lines</span><span class="o">.</span><span class="n">map</span><span class="p">(</span><span class="k">lambda</span> <span class="n">s</span><span class="p">:</span> <span class="nb">len</span><span class="p">(</span><span class="n">s</span><span class="p">))</span>
-<span class="n">totalLength</span> <span class="o">=</span> <span class="n">lineLengths</span><span class="o">.</span><span class="n">reduce</span><span class="p">(</span><span class="k">lambda</span> <span class="n">a</span><span class="p">,</span> <span class="n">b</span><span class="p">:</span> <span class="n">a</span> <span class="o">+</span> <span class="n">b</span><span class="p">)</span></code></pre></div>
+<span class="n">totalLength</span> <span class="o">=</span> <span class="n">lineLengths</span><span class="o">.</span><span class="n">reduce</span><span class="p">(</span><span class="k">lambda</span> <span class="n">a</span><span class="p">,</span> <span class="n">b</span><span class="p">:</span> <span class="n">a</span> <span class="o">+</span> <span class="n">b</span><span class="p">)</span></code></pre></figure>
 
     <p>The first line defines a base RDD from an external file. This dataset is not loaded in memory or
 otherwise acted on: <code>lines</code> is merely a pointer to the file.
@@ -734,7 +734,7 @@ returning only its answer to the driver program.</p>
 
     <p>If we also wanted to use <code>lineLengths</code> again later, we could add:</p>
 
-    <div class="highlight"><pre><code class="language-python" data-lang="python"><span class="n">lineLengths</span><span class="o">.</span><span class="n">persist</span><span class="p">()</span></code></pre></div>
+    <figure class="highlight"><pre><code class="language-python" data-lang="python"><span></span><span class="n">lineLengths</span><span class="o">.</span><span class="n">persist</span><span class="p">()</span></code></pre></figure>
 
     <p>before the <code>reduce</code>, which would cause <code>lineLengths</code> to be saved in memory after the first time it is computed.</p>
 
@@ -758,20 +758,20 @@ which can be used for short pieces of code.</li>
 pass <code>MyFunctions.func1</code>, as follows:</li>
     </ul>
 
-    <div class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="k">object</span> <span class="nc">MyFunctions</span> <span class="o">{</span>
+    <figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span></span><span class="k">object</span> <span class="nc">MyFunctions</span> <span class="o">{</span>
   <span class="k">def</span> <span class="n">func1</span><span class="o">(</span><span class="n">s</span><span class="k">:</span> <span class="kt">String</span><span class="o">)</span><span class="k">:</span> <span class="kt">String</span> <span class="o">=</span> <span class="o">{</span> <span class="o">...</span> <span class="o">}</span>
 <span class="o">}</span>
 
-<span class="n">myRdd</span><span class="o">.</span><span class="n">map</span><span class="o">(</span><span class="nc">MyFunctions</span><span class="o">.</span><span class="n">func1</span><span class="o">)</span></code></pre></div>
+<span class="n">myRdd</span><span class="o">.</span><span class="n">map</span><span class="o">(</span><span class="nc">MyFunctions</span><span class="o">.</span><span class="n">func1</span><span class="o">)</span></code></pre></figure>
 
     <p>Note that while it is also possible to pass a reference to a method in a class instance (as opposed to
 a singleton object), this requires sending the object that contains that class along with the method.
 For example, consider:</p>
 
-    <div class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="k">class</span> <span class="nc">MyClass</span> <span class="o">{</span>
+    <figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span></span><span class="k">class</span> <span class="nc">MyClass</span> <span class="o">{</span>
   <span class="k">def</span> <span class="n">func1</span><span class="o">(</span><span class="n">s</span><span class="k">:</span> <span class="kt">String</span><span class="o">)</span><span class="k">:</span> <span class="kt">String</span> <span class="o">=</span> <span class="o">{</span> <span class="o">...</span> <span class="o">}</span>
   <span class="k">def</span> <span class="n">doStuff</span><span class="o">(</span><span class="n">rdd</span><span class="k">:</span> <span class="kt">RDD</span><span class="o">[</span><span class="kt">String</span><span class="o">])</span><span class="k">:</span> <span class="kt">RDD</span><span class="o">[</span><span class="kt">String</span><span class="o">]</span> <span class="k">=</span> <span class="o">{</span> <span class="n">rdd</span><span class="o">.</span><span class="n">map</span><span class="o">(</span><span class="n">func1</span><span class="o">)</span> <span class="o">}</span>
-<span class="o">}</span></code></pre></div>
+<span class="o">}</span></code></pre></figure>
 
     <p>Here, if we create a new <code>MyClass</code> instance and call <code>doStuff</code> on it, the <code>map</code> inside there references the
 <code>func1</code> method <em>of that <code>MyClass</code> instance</em>, so the whole object needs to be sent to the cluster. It is
@@ -779,18 +779,18 @@ similar to writing <code>rdd.map(x =&gt; this.func1(x))</code>.</p>
 
     <p>In a similar way, accessing fields of the outer object will reference the whole object:</p>
 
-    <div class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="k">class</span> <span class="nc">MyClass</span> <span class="o">{</span>
+    <figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span></span><span class="k">class</span> <span class="nc">MyClass</span> <span class="o">{</span>
   <span class="k">val</span> <span class="n">field</span> <span class="k">=</span> <span class="s">&quot;Hello&quot;</span>
   <span class="k">def</span> <span class="n">doStuff</span><span class="o">(</span><span class="n">rdd</span><span class="k">:</span> <span class="kt">RDD</span><span class="o">[</span><span class="kt">String</span><span class="o">])</span><span class="k">:</span> <span class="kt">RDD</span><span class="o">[</span><span class="kt">String</span><span class="o">]</span> <span class="k">=</span> <span class="o">{</span> <span class="n">rdd</span><span class="o">.</span><span class="n">map</span><span class="o">(</span><span class="n">x</span> <span class="k">=&gt;</span> <span class="n">field</span> <span class="o">+</span> <span class="n">x</span><span class="o">)</span> <span class="o">}</span>
-<span class="o">}</span></code></pre></div>
+<span class="o">}</span></code></pre></figure>
 
     <p>is equivalent to writing <code>rdd.map(x =&gt; this.field + x)</code>, which references all of <code>this</code>. To avoid this
 issue, the simplest way is to copy <code>field</code> into a local variable instead of accessing it externally:</p>
 
-    <div class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="k">def</span> <span class="n">doStuff</span><span class="o">(</span><span class="n">rdd</span><span class="k">:</span> <span class="kt">RDD</span><span class="o">[</span><span class="kt">String</span><span class="o">])</span><span class="k">:</span> <span class="kt">RDD</span><span class="o">[</span><span class="kt">String</span><span class="o">]</span> <span class="k">=</span> <span class="o">{</span>
+    <figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span></span><span class="k">def</span> <span class="n">doStuff</span><span class="o">(</span><span class="n">rdd</span><span class="k">:</span> <span class="kt">RDD</span><span class="o">[</span><span class="kt">String</span><span class="o">])</span><span class="k">:</span> <span class="kt">RDD</span><span class="o">[</span><span class="kt">String</span><span class="o">]</span> <span class="k">=</span> <span class="o">{</span>
   <span class="k">val</span> <span class="n">field_</span> <span class="k">=</span> <span class="k">this</span><span class="o">.</span><span class="n">field</span>
   <span class="n">rdd</span><span class="o">.</span><span class="n">map</span><span class="o">(</span><span class="n">x</span> <span class="k">=&gt;</span> <span class="n">field_</span> <span class="o">+</span> <span class="n">x</span><span class="o">)</span>
-<span class="o">}</span></code></pre></div>
+<span class="o">}</span></code></pre></figure>
 
   </div>
 
@@ -811,17 +811,17 @@ to concisely define an implementation.</li>
     <p>While much of this guide uses lambda syntax for conciseness, it is easy to use all the same APIs
 in long-form. For example, we could have written our code above as follows:</p>
 
-    <div class="highlight"><pre><code class="language-java" data-lang="java"><span class="n">JavaRDD</span><span class="o">&lt;</span><span class="n">String</span><span class="o">&gt;</span> <span class="n">lines</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="na">textFile</span><span class="o">(</span><span class="s">&quot;data.txt&quot;</span><span class="o">);</span>
+    <figure class="highlight"><pre><code class="language-java" data-lang="java"><span></span><span class="n">JavaRDD</span><span class="o">&lt;</span><span class="n">String</span><span class="o">&gt;</span> <span class="n">lines</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="na">textFile</span><span class="o">(</span><span class="s">&quot;data.txt&quot;</span><span class="o">);</span>
 <span class="n">JavaRDD</span><span class="o">&lt;</span><span class="n">Integer</span><span class="o">&gt;</span> <span class="n">lineLengths</span> <span class="o">=</span> <span class="n">lines</span><span class="o">.</span><span class="na">map</span><span class="o">(</span><span class="k">new</span> <span class="n">Function</span><span class="o">&lt;</span><span class="n">String</span><span class="o">,</span> <span class="n">Integer</span><span class="o">&gt;()</span> <span class="o">{</span>
   <span class="kd">public</span> <span class="n">Integer</span> <span class="nf">call</span><span class="o">(</span><span class="n">String</span> <span class="n">s</span><span class="o">)</span> <span class="o">{</span> <span class="k">return</span> <span class="n">s</span><span class="o">.</span><span class="na">length</span><span class="o">();</span> <span class="o">}</span>
 <span class="o">});</span>
 <span class="kt">int</span> <span class="n">totalLength</span> <span class="o">=</span> <span class="n">lineLengths</span><span class="o">.</span><span class="na">reduce</span><span class="o">(</span><span class="k">new</span> <span class="n">Function2</span><span class="o">&lt;</span><span class="n">Integer</span><span class="o">,</span> <span class="n">Integer</span><span class="o">,</span> <span class="n">Integer</span><span class="o">&gt;()</span> <span class="o">{</span>
   <span class="kd">public</span> <span class="n">Integer</span> <span class="nf">call</span><span class="o">(</span><span class="n">Integer</span> <span class="n">a</span><span class="o">,</span> <span class="n">Integer</span> <span class="n">b</span><span class="o">)</span> <span class="o">{</span> <span class="k">return</span> <span class="n">a</span> <span class="o">+</span> <span class="n">b</span><span class="o">;</span> <span class="o">}</span>
-<span class="o">});</span></code></pre></div>
+<span class="o">});</span></code></pre></figure>
 
     <p>Or, if writing the functions inline is unwieldy:</p>
 
-    <div class="highlight"><pre><code class="language-java" data-lang="java"><span class="kd">class</span> <span class="nc">GetLength</span> <span class="kd">implements</span> <span class="n">Function</span><span class="o">&lt;</span><span class="n">String</span><span class="o">,</span> <span class="n">Integer</span><span class="o">&gt;</span> <span class="o">{</span>
+    <figure class="highlight"><pre><code class="language-java" data-lang="java"><span></span><span class="kd">class</span> <span class="nc">GetLength</span> <span class="kd">implements</span> <span class="n">Function</span><span class="o">&lt;</span><span class="n">String</span><span class="o">,</span> <span class="n">Integer</span><span class="o">&gt;</span> <span class="o">{</span>
   <span class="kd">public</span> <span class="n">Integer</span> <span class="nf">call</span><span class="o">(</span><span class="n">String</span> <span class="n">s</span><span class="o">)</span> <span class="o">{</span> <span class="k">return</span> <span class="n">s</span><span class="o">.</span><span class="na">length</span><span class="o">();</span> <span class="o">}</span>
 <span class="o">}</span>
 <span class="kd">class</span> <span class="nc">Sum</span> <span class="kd">implements</span> <span class="n">Function2</span><span class="o">&lt;</span><span class="n">Integer</span><span class="o">,</span> <span class="n">Integer</span><span class="o">,</span> <span class="n">Integer</span><span class="o">&gt;</span> <span class="o">{</span>
@@ -829,8 +829,8 @@ in long-form. For example, we could have written our code above as follows:</p>
 <span class="o">}</span>
 
 <span class="n">JavaRDD</span><span class="o">&lt;</span><span class="n">String</span><span class="o">&gt;</span> <span class="n">lines</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="na">textFile</span><span class="o">(</span><span class="s">&quot;data.txt&quot;</span><span class="o">);</span>
-<span class="n">JavaRDD</span><span class="o">&lt;</span><span class="n">Integer</span><span class="o">&gt;</span> <span class="n">lineLengths</span> <span class="o">=</span> <span class="n">lines</span><span class="o">.</span><span class="na">map</span><span class="o">(</span><span class="k">new</span> <span class="nf">GetLength</span><span class="o">());</span>
-<span class="kt">int</span> <span class="n">totalLength</span> <span class="o">=</span> <span class="n">lineLengths</span><span class="o">.</span><span class="na">reduce</span><span class="o">(</span><span class="k">new</span> <span class="nf">Sum</span><span class="o">());</span></code></pre></div>
+<span class="n">JavaRDD</span><span class="o">&lt;</span><span class="n">Integer</span><span class="o">&gt;</span> <span class="n">lineLengths</span> <span class="o">=</span> <span class="n">lines</span><span class="o">.</span><span class="na">map</span><span class="o">(</span><span class="k">new</span> <span class="n">GetLength</span><span class="o">());</span>
+<span class="kt">int</span> <span class="n">totalLength</span> <span class="o">=</span> <span class="n">lineLengths</span><span class="o">.</span><span class="na">reduce</span><span class="o">(</span><span class="k">new</span> <span class="n">Sum</span><span class="o">());</span></code></pre></figure>
 
     <p>Note that anonymous inner classes in Java can also access variables in the enclosing scope as long
 as they are marked <code>final</code>. Spark will ship copies of these variables to each worker node as it does
@@ -854,42 +854,42 @@ functions or statements that do not return a value.)</li>
     <p>For example, to pass a longer function than can be supported using a <code>lambda</code>, consider
 the code below:</p>
 
-    <div class="highlight"><pre><code class="language-python" data-lang="python"><span class="sd">&quot;&quot;&quot;MyScript.py&quot;&quot;&quot;</span>
-<span class="k">if</span> <span class="n">__name__</span> <span class="o">==</span> <span class="s">&quot;__main__&quot;</span><span class="p">:</span>
+    <figure class="highlight"><pre><code class="language-python" data-lang="python"><span></span><span class="sd">&quot;&quot;&quot;MyScript.py&quot;&quot;&quot;</span>
+<span class="k">if</span> <span class="vm">__name__</span> <span class="o">==</span> <span class="s2">&quot;__main__&quot;</span><span class="p">:</span>
     <span class="k">def</span> <span class="nf">myFunc</span><span class="p">(</span><span class="n">s</span><span class="p">):</span>
-        <span class="n">words</span> <span class="o">=</span> <span class="n">s</span><span class="o">.</span><span class="n">split</span><span class="p">(</span><span class="s">&quot; &quot;</span><span class="p">)</span>
+        <span class="n">words</span> <span class="o">=</span> <span class="n">s</span><span class="o">.</span><span class="n">split</span><span class="p">(</span><span class="s2">&quot; &quot;</span><span class="p">)</span>
         <span class="k">return</span> <span class="nb">len</span><span class="p">(</span><span class="n">words</span><span class="p">)</span>
 
     <span class="n">sc</span> <span class="o">=</span> <span class="n">SparkContext</span><span class="p">(</span><span class="o">...</span><span class="p">)</span>
-    <span class="n">sc</span><span class="o">.</span><span class="n">textFile</span><span class="p">(</span><span class="s">&quot;file.txt&quot;</span><span class="p">)</span><span class="o">.</span><span class="n">map</span><span class="p">(</span><span class="n">myFunc</span><span class="p">)</span></code></pre></div>
+    <span class="n">sc</span><span class="o">.</span><span class="n">textFile</span><span class="p">(</span><span class="s2">&quot;file.txt&quot;</span><span class="p">)</span><span class="o">.</span><span class="n">map</span><span class="p">(</span><span class="n">myFunc</span><span class="p">)</span></code></pre></figure>
 
     <p>Note that while it is also possible to pass a reference to a method in a class instance (as opposed to
 a singleton object), this requires sending the object that contains that class along with the method.
 For example, consider:</p>
 
-    <div class="highlight"><pre><code class="language-python" data-lang="python"><span class="k">class</span> <span class="nc">MyClass</span><span class="p">(</span><span class="nb">object</span><span class="p">):</span>
+    <figure class="highlight"><pre><code class="language-python" data-lang="python"><span></span><span class="k">class</span> <span class="nc">MyClass</span><span class="p">(</span><span class="nb">object</span><span class="p">):</span>
     <span class="k">def</span> <span class="nf">func</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">s</span><span class="p">):</span>
         <span class="k">return</span> <span class="n">s</span>
     <span class="k">def</span> <span class="nf">doStuff</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">rdd</span><span class="p">):</span>
-        <span class="k">return</span> <span class="n">rdd</span><span class="o">.</span><span class="n">map</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">func</span><span class="p">)</span></code></pre></div>
+        <span class="k">return</span> <span class="n">rdd</span><span class="o">.</span><span class="n">map</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">func</span><span class="p">)</span></code></pre></figure>
 
     <p>Here, if we create a <code>new MyClass</code> and call <code>doStuff</code> on it, the <code>map</code> inside there references the
 <code>func</code> method <em>of that <code>MyClass</code> instance</em>, so the whole object needs to be sent to the cluster.</p>
 
     <p>In a similar way, accessing fields of the outer object will reference the whole object:</p>
 
-    <div class="highlight"><pre><code class="language-python" data-lang="python"><span class="k">class</span> <span class="nc">MyClass</span><span class="p">(</span><span class="nb">object</span><span class="p">):</span>
-    <span class="k">def</span> <span class="nf">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
-        <span class="bp">self</span><span class="o">.</span><span class="n">field</span> <span class="o">=</span> <span class="s">&quot;Hello&quot;</span>
+    <figure class="highlight"><pre><code class="language-python" data-lang="python"><span></span><span class="k">class</span> <span class="nc">MyClass</span><span class="p">(</span><span class="nb">object</span><span class="p">):</span>
+    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
+        <span class="bp">self</span><span class="o">.</span><span class="n">field</span> <span class="o">=</span> <span class="s2">&quot;Hello&quot;</span>
     <span class="k">def</span> <span class="nf">doStuff</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">rdd</span><span class="p">):</span>
-        <span class="k">return</span> <span class="n">rdd</span><span class="o">.</span><span class="n">map</span><span class="p">(</span><span class="k">lambda</span> <span class="n">s</span><span class="p">:</span> <span class="bp">self</span><span class="o">.</span><span class="n">field</span> <span class="o">+</span> <span class="n">s</span><span class="p">)</span></code></pre></div>
+        <span class="k">return</span> <span class="n">rdd</span><span class="o">.</span><span class="n">map</span><span class="p">(</span><span class="k">lambda</span> <span class="n">s</span><span class="p">:</span> <span class="bp">self</span><span class="o">.</span><span class="n">field</span> <span class="o">+</span> <span class="n">s</span><span class="p">)</span></code></pre></figure>
 
     <p>To avoid this issue, the simplest way is to copy <code>field</code> into a local variable instead
 of accessing it externally:</p>
 
-    <div class="highlight"><pre><code class="language-python" data-lang="python"><span class="k">def</span> <span class="nf">doStuff</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">rdd</span><span class="p">):</span>
+    <figure class="highlight"><pre><code class="language-python" data-lang="python"><span></span><span class="k">def</span> <span class="nf">doStuff</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">rdd</span><span class="p">):</span>
     <span class="n">field</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">field</span>
-    <span class="k">return</span> <span class="n">rdd</span><span class="o">.</span><span class="n">map</span><span class="p">(</span><span class="k">lambda</span> <span class="n">s</span><span class="p">:</span> <span class="n">field</span> <span class="o">+</span> <span class="n">s</span><span class="p">)</span></code></pre></div>
+    <span class="k">return</span> <span class="n">rdd</span><span class="o">.</span><span class="n">map</span><span class="p">(</span><span class="k">lambda</span> <span class="n">s</span><span class="p">:</span> <span class="n">field</span> <span class="o">+</span> <span class="n">s</span><span class="p">)</span></code></pre></figure>
 
   </div>
 
@@ -906,40 +906,40 @@ of accessing it externally:</p>
 
 <div data-lang="scala">
 
-    <div class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="k">var</span> <span class="n">counter</span> <span class="k">=</span> <span class="mi">0</span>
+    <figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span></span><span class="k">var</span> <span class="n">counter</span> <span class="k">=</span> <span class="mi">0</span>
 <span class="k">var</span> <span class="n">rdd</span> <span class="k">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">parallelize</span><span class="o">(</span><span class="n">data</span><span class="o">)</span>
 
 <span class="c1">// Wrong: Don&#39;t do this!!</span>
 <span class="n">rdd</span><span class="o">.</span><span class="n">foreach</span><span class="o">(</span><span class="n">x</span> <span class="k">=&gt;</span> <span class="n">counter</span> <span class="o">+=</span> <span class="n">x</span><span class="o">)</span>
 
-<span class="n">println</span><span class="o">(</span><span class="s">&quot;Counter value: &quot;</span> <span class="o">+</span> <span class="n">counter</span><span class="o">)</span></code></pre></div>
+<span class="n">println</span><span class="o">(</span><span class="s">&quot;Counter value: &quot;</span> <span class="o">+</span> <span class="n">counter</span><span class="o">)</span></code></pre></figure>
 
   </div>
 
 <div data-lang="java">
 
-    <div class="highlight"><pre><code class="language-java" data-lang="java"><span class="kt">int</span> <span class="n">counter</span> <span class="o">=</span> <span class="mi">0</span><span class="o">;</span>
+    <figure class="highlight"><pre><code class="language-java" data-lang="java"><span></span><span class="kt">int</span> <span class="n">counter</span> <span class="o">=</span> <span class="mi">0</span><span class="o">;</span>
 <span class="n">JavaRDD</span><span class="o">&lt;</span><span class="n">Integer</span><span class="o">&gt;</span> <span class="n">rdd</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="na">parallelize</span><span class="o">(</span><span class="n">data</span><span class="o">);</span>
 
 <span class="c1">// Wrong: Don&#39;t do this!!</span>
 <span class="n">rdd</span><span class="o">.</span><span class="na">foreach</span><span class="o">(</span><span class="n">x</span> <span class="o">-&gt;</span> <span class="n">counter</span> <span class="o">+=</span> <span class="n">x</span><span class="o">);</span>
 
-<span class="n">println</span><span class="o">(</span><span class="s">&quot;Counter value: &quot;</span> <span class="o">+</span> <span class="n">counter</span><span class="o">);</span></code></pre></div>
+<span class="n">println</span><span class="o">(</span><span class="s">&quot;Counter value: &quot;</span> <span class="o">+</span> <span class="n">counter</span><span class="o">);</span></code></pre></figure>
 
   </div>
 
 <div data-lang="python">
 
-    <div class="highlight"><pre><code class="language-python" data-lang="python"><span class="n">counter</span> <span class="o">=</span> <span class="mi">0</span>
+    <figure class="highlight"><pre><code class="language-python" data-lang="python"><span></span><span class="n">counter</span> <span class="o">=</span> <span class="mi">0</span>
 <span class="n">rdd</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">parallelize</span><span class="p">(</span><span class="n">data</span><span class="p">)</span>
 
-<span class="c"># Wrong: Don&#39;t do this!!</span>
+<span class="c1"># Wrong: Don&#39;t do this!!</span>
 <span class="k">def</span> <span class="nf">increment_counter</span><span class="p">(</span><span class="n">x</span><span class="p">):</span>
     <span class="k">global</span> <span class="n">counter</span>
     <span class="n">counter</span> <span class="o">+=</span> <span class="n">x</span>
 <span class="n">rdd</span><span class="o">.</span><span class="n">foreach</span><span class="p">(</span><span class="n">increment_counter</span><span class="p">)</span>
 
-<span class="k">print</span><span class="p">(</span><span class="s">&quot;Counter value: &quot;</span><span class="p">,</span> <span class="n">counter</span><span class="p">)</span></code></pre></div>
+<span class="k">print</span><span class="p">(</span><span class="s2">&quot;Counter value: &quot;</span><span class="p">,</span> <span class="n">counter</span><span class="p">)</span></code></pre></figure>
 
   </div>
 
@@ -953,7 +953,7 @@ of accessing it externally:</p>
 
 <p>In local mode, in some circumstances the <code>foreach</code> function will actually execute within the same JVM as the driver and will reference the same original <strong>counter</strong>, and may actually update it.</p>
 
-<p>To ensure well-defined behavior in these sorts of scenarios one should use an <a href="#accumulators"><code>Accumulator</code></a>. Accumulators in Spark are used specifically to provide a mechanism for safely updating a variable when execution is split up across worker nodes in a cluster. The Accumulators section of this guide discusses these in more detail.</p>
+<p>To ensure well-defined behavior in these sorts of scenarios one should use an <a href="#accumulators"><code>Accumulator</code></a>. Accumulators in Spark are used specifically to provide a mechanism for safely updating a variable when execution is split up across worker nodes in a cluster. The Accumulators section of this guide discusses these in more detail.  </p>
 
 <p>In general, closures - constructs like loops or locally defined methods, should not be used to mutate some global state. Spark does not define or guarantee the behavior of mutations to objects referenced from outside of closures. Some code that does this may work in local mode, but that&#8217;s just by accident and such code will not behave as expected in distributed mode. Use an Accumulator instead if some global aggregation is needed.</p>
 
@@ -980,9 +980,9 @@ which automatically wraps around an RDD of tuples.</p>
     <p>For example, the following code uses the <code>reduceByKey</code> operation on key-value pairs to count how
 many times each line of text occurs in a file:</p>
 
-    <div class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="k">val</span> <span class="n">lines</span> <span class="k">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">textFile</span><span class="o">(</span><span class="s">&quot;data.txt&quot;</span><span class="o">)</span>
+    <figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span></span><span class="k">val</span> <span class="n">lines</span> <span class="k">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">textFile</span><span class="o">(</span><span class="s">&quot;data.txt&quot;</span><span class="o">)</span>
 <span class="k">val</span> <span class="n">pairs</span> <span class="k">=</span> <span class="n">lines</span><span class="o">.</span><span class="n">map</span><span class="o">(</span><span class="n">s</span> <span class="k">=&gt;</span> <span class="o">(</span><span class="n">s</span><span class="o">,</span> <span class="mi">1</span><span class="o">))</span>
-<span class="k">val</span> <span class="n">counts</span> <span class="k">=</span> <span class="n">pairs</span><span class="o">.</span><span class="n">reduceByKey</span><span class="o">((</span><span class="n">a</span><span class="o">,</span> <span class="n">b</span><span class="o">)</span> <span class="k">=&gt;</span> <span class="n">a</span> <span class="o">+</span> <span class="n">b</span><span class="o">)</span></code></pre></div>
+<span class="k">val</span> <span class="n">counts</span> <span class="k">=</span> <span class="n">pairs</span><span class="o">.</span><span class="n">reduceByKey</span><span class="o">((</span><span class="n">a</span><span class="o">,</span> <span class="n">b</span><span class="o">)</span> <span class="k">=&gt;</span> <span class="n">a</span> <span class="o">+</span> <span class="n">b</span><span class="o">)</span></code></pre></figure>
 
     <p>We could also use <code>counts.sortByKey()</code>, for example, to sort the pairs alphabetically, and finally
 <code>counts.collect()</code> to bring them back to the driver program as an array of objects.</p>
@@ -1015,9 +1015,9 @@ key-value ones.</p>
     <p>For example, the following code uses the <code>reduceByKey</code> operation on key-value pairs to count how
 many times each line of text occurs in a file:</p>
 
-    <div class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="nc">JavaRDD</span><span class="o">&lt;</span><span class="nc">String</span><span class="o">&gt;</span> <span class="n">lines</span> <span class="k">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">textFile</span><span class="o">(</span><span class="s">&quot;data.txt&quot;</span><span class="o">);</span>
+    <figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span></span><span class="nc">JavaRDD</span><span class="o">&lt;</span><span class="nc">String</span><span class="o">&gt;</span> <span class="n">lines</span> <span class="k">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">textFile</span><span class="o">(</span><span class="s">&quot;data.txt&quot;</span><span class="o">);</span>
 <span class="nc">JavaPairRDD</span><span class="o">&lt;</span><span class="nc">String</span><span class="o">,</span> <span class="nc">Integer</span><span class="o">&gt;</span> <span class="n">pairs</span> <span class="k">=</span> <span class="n">lines</span><span class="o">.</span><span class="n">mapToPair</span><span class="o">(</span><span class="n">s</span> <span class="o">-&gt;</span> <span class="k">new</span> <span class="nc">Tuple2</span><span class="o">(</span><span class="n">s</span><span class="o">,</span> <span class="mi">1</span><span class="o">));</span>
-<span class="nc">JavaPairRDD</span><span class="o">&lt;</span><span class="nc">String</span><span class="o">,</span> <span class="nc">Integer</span><span class="o">&gt;</span> <span class="n">counts</span> <span class="k">=</span> <span class="n">pairs</span><span class="o">.</span><span class="n">reduceByKey</span><span class="o">((</span><span class="n">a</span><span class="o">,</span> <span class="n">b</span><span class="o">)</span> <span class="o">-&gt;</span> <span class="n">a</span> <span class="o">+</span> <span class="n">b</span><span class="o">);</span></code></pre></div>
+<span class="nc">JavaPairRDD</span><span class="o">&lt;</span><span class="nc">String</span><span class="o">,</span> <span class="nc">Integer</span><span class="o">&gt;</span> <span class="n">counts</span> <span class="k">=</span> <span class="n">pairs</span><span class="o">.</span><span class="n">reduceByKey</span><span class="o">((</span><span class="n">a</span><span class="o">,</span> <span class="n">b</span><span class="o">)</span> <span class="o">-&gt;</span> <span class="n">a</span> <span class="o">+</span> <span class="n">b</span><span class="o">);</span></code></pre></figure>
 
     <p>We could also use <code>counts.sortByKey()</code>, for example, to sort the pairs alphabetically, and finally
 <code>counts.collect()</code> to bring them back to the driver program as an array of objects.</p>
@@ -1042,9 +1042,9 @@ Simply create such tuples and then call your desired operation.</p>
     <p>For example, the following code uses the <code>reduceByKey</code> operation on key-value pairs to count how
 many times each line of text occurs in a file:</p>
 
-    <div class="highlight"><pre><code class="language-python" data-lang="python"><span class="n">lines</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">textFile</span><span class="p">(</span><span class="s">&quot;data.txt&quot;</span><span class="p">)</span>
+    <figure class="highlight"><pre><code class="language-python" data-lang="python"><span></span><span class="n">lines</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">textFile</span><span class="p">(</span><span class="s2">&quot;data.txt&quot;</span><span class="p">)</span>
 <span class="n">pairs</span> <span class="o">=</span> <span class="n">lines</span><span class="o">.</span><span class="n">map</span><span class="p">(</span><span class="k">lambda</span> <span class="n">s</span><span class="p">:</span> <span class="p">(</span><span class="n">s</span><span class="p">,</span> <span class="mi">1</span><span class="p">))</span>
-<span class="n">counts</span> <span class="o">=</span> <span class="n">pairs</span><span class="o">.</span><span class="n">reduceByKey</span><span class="p">(</span><span class="k">lambda</span> <span class="n">a</span><span class="p">,</span> <span class="n">b</span><span class="p">:</span> <span class="n">a</span> <span class="o">+</span> <span class="n">b</span><span class="p">)</span></code></pre></div>
+<span class="n">counts</span> <span class="o">=</span> <span class="n">pairs</span><span class="o">.</span><span class="n">reduceByKey</span><span class="p">(</span><span class="k">lambda</span> <span class="n">a</span><span class="p">,</span> <span class="n">b</span><span class="p">:</span> <span class="n">a</span> <span class="o">+</span> <span class="n">b</span><span class="p">)</span></code></pre></figure>
 
     <p>We could also use <code>counts.sortByKey()</code>, for example, to sort the pairs alphabetically, and finally
 <code>counts.collect()</code> to bring them back to the driver program as a list of objects.</p>
@@ -1435,30 +1435,30 @@ method. The code below shows this:</p>
 
 <div data-lang="scala">
 
-    <div class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="n">scala</span><span class="o">&gt;</span> <span class="k">val</span> <span class="n">broadcastVar</span> <span class="k">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">broadcast</span><span class="o">(</span><span class="nc">Array</span><span class="o">(</span><span class="mi">1</span><span class="o">,</span> <span class="mi">2</span><span class="o">,</span> <span class="mi">3</span><span class="o">))</span>
+    <figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span></span><span class="n">scala</span><span class="o">&gt;</span> <span class="k">val</span> <span class="n">broadcastVar</span> <span class="k">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">broadcast</span><span class="o">(</span><span class="nc">Array</span><span class="o">(</span><span class="mi">1</span><span class="o">,</span> <span class="mi">2</span><span class="o">,</span> <span class="mi">3</span><span class="o">))</span>
 <span class="n">broadcastVar</span><span class="k">:</span> <span class="kt">org.apache.spark.broadcast.Broadcast</span><span class="o">[</span><span class="kt">Array</span><span class="o">[</span><span class="kt">Int</span><span class="o">]]</span> <span class="k">=</span> <span class="nc">Broadcast</span><span class="o">(</span><span class="mi">0</span><span class="o">)</span>
 
 <span class="n">scala</span><span class="o">&gt;</span> <span class="n">broadcastVar</span><span class="o">.</span><span class="n">value</span>
-<span class="n">res0</span><span class="k">:</span> <span class="kt">Array</span><span class="o">[</span><span class="kt">Int</span><span class="o">]</span> <span class="k">=</span> <span class="nc">Array</span><span class="o">(</span><span class="mi">1</span><span class="o">,</span> <span class="mi">2</span><span class="o">,</span> <span class="mi">3</span><span class="o">)</span></code></pre></div>
+<span class="n">res0</span><span class="k">:</span> <span class="kt">Array</span><span class="o">[</span><span class="kt">Int</span><span class="o">]</span> <span class="k">=</span> <span class="nc">Array</span><span class="o">(</span><span class="mi">1</span><span class="o">,</span> <span class="mi">2</span><span class="o">,</span> <span class="mi">3</span><span class="o">)</span></code></pre></figure>
 
   </div>
 
 <div data-lang="java">
 
-    <div class="highlight"><pre><code class="language-java" data-lang="java"><span class="n">Broadcast</span><span class="o">&lt;</span><span class="kt">int</span><span class="o">[]&gt;</span> <span class="n">broadcastVar</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="na">broadcast</span><span class="o">(</span><span class="k">new<

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org