You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by gu...@apache.org on 2018/07/03 18:12:26 UTC

[6/6] spark-website git commit: Fix signature description broken in PySpark API documentation in 2.1.3

Fix signature description broken in PySpark API documentation in 2.1.3


Project: http://git-wip-us.apache.org/repos/asf/spark-website/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark-website/commit/da71a5c1
Tree: http://git-wip-us.apache.org/repos/asf/spark-website/tree/da71a5c1
Diff: http://git-wip-us.apache.org/repos/asf/spark-website/diff/da71a5c1

Branch: refs/heads/asf-site
Commit: da71a5c1d80c963901b0f15c64ef18ee5b0a0bd8
Parents: 6bbac49
Author: hyukjinkwon <gu...@apache.org>
Authored: Tue Jul 3 02:08:45 2018 +0800
Committer: hyukjinkwon <gu...@apache.org>
Committed: Wed Jul 4 02:12:13 2018 +0800

----------------------------------------------------------------------
 site/docs/2.1.3/api/python/pyspark.html         |  22 +-
 site/docs/2.1.3/api/python/pyspark.ml.html      | 144 +++++------
 site/docs/2.1.3/api/python/pyspark.mllib.html   |  28 +--
 site/docs/2.1.3/api/python/pyspark.sql.html     | 248 +++++++++----------
 .../2.1.3/api/python/pyspark.streaming.html     |   3 +-
 site/docs/2.1.3/api/python/searchindex.js       |   2 +-
 6 files changed, 224 insertions(+), 223 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark-website/blob/da71a5c1/site/docs/2.1.3/api/python/pyspark.html
----------------------------------------------------------------------
diff --git a/site/docs/2.1.3/api/python/pyspark.html b/site/docs/2.1.3/api/python/pyspark.html
index 18248bc..068ac7d 100644
--- a/site/docs/2.1.3/api/python/pyspark.html
+++ b/site/docs/2.1.3/api/python/pyspark.html
@@ -259,7 +259,7 @@ Its format depends on the scheduler implementation.</p>
 <li>in case of YARN something like ‘application_1433865536131_34483’</li>
 </ul>
 <div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="n">sc</span><span class="o">.</span><span class="n">applicationId</span>  
-<span class="go">u&#39;local-...&#39;</span>
+<span class="go">&#39;local-...&#39;</span>
 </pre></div>
 </div>
 </dd></dl>
@@ -738,7 +738,7 @@ Spark 1.2)</p>
 <span class="gp">... </span>   <span class="n">_</span> <span class="o">=</span> <span class="n">testFile</span><span class="o">.</span><span class="n">write</span><span class="p">(</span><span class="s2">&quot;Hello world!&quot;</span><span class="p">)</span>
 <span class="gp">&gt;&gt;&gt; </span><span class="n">textFile</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">textFile</span><span class="p">(</span><span class="n">path</span><span class="p">)</span>
 <span class="gp">&gt;&gt;&gt; </span><span class="n">textFile</span><span class="o">.</span><span class="n">collect</span><span class="p">()</span>
-<span class="go">[u&#39;Hello world!&#39;]</span>
+<span class="go">[&#39;Hello world!&#39;]</span>
 </pre></div>
 </div>
 </dd></dl>
@@ -761,10 +761,10 @@ serializer:</p>
 <span class="gp">... </span>   <span class="n">_</span> <span class="o">=</span> <span class="n">testFile</span><span class="o">.</span><span class="n">write</span><span class="p">(</span><span class="s2">&quot;Hello&quot;</span><span class="p">)</span>
 <span class="gp">&gt;&gt;&gt; </span><span class="n">textFile</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">textFile</span><span class="p">(</span><span class="n">path</span><span class="p">)</span>
 <span class="gp">&gt;&gt;&gt; </span><span class="n">textFile</span><span class="o">.</span><span class="n">collect</span><span class="p">()</span>
-<span class="go">[u&#39;Hello&#39;]</span>
+<span class="go">[&#39;Hello&#39;]</span>
 <span class="gp">&gt;&gt;&gt; </span><span class="n">parallelized</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">parallelize</span><span class="p">([</span><span class="s2">&quot;World!&quot;</span><span class="p">])</span>
 <span class="gp">&gt;&gt;&gt; </span><span class="nb">sorted</span><span class="p">(</span><span class="n">sc</span><span class="o">.</span><span class="n">union</span><span class="p">([</span><span class="n">textFile</span><span class="p">,</span> <span class="n">parallelized</span><span class="p">])</span><span class="o">.</span><span class="n">collect</span><span class="p">())</span>
-<span class="go">[u&#39;Hello&#39;, &#39;World!&#39;]</span>
+<span class="go">[&#39;Hello&#39;, &#39;World!&#39;]</span>
 </pre></div>
 </div>
 </dd></dl>
@@ -814,7 +814,7 @@ fully in memory.</p>
 <span class="gp">... </span>   <span class="n">_</span> <span class="o">=</span> <span class="n">file2</span><span class="o">.</span><span class="n">write</span><span class="p">(</span><span class="s2">&quot;2&quot;</span><span class="p">)</span>
 <span class="gp">&gt;&gt;&gt; </span><span class="n">textFiles</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">wholeTextFiles</span><span class="p">(</span><span class="n">dirPath</span><span class="p">)</span>
 <span class="gp">&gt;&gt;&gt; </span><span class="nb">sorted</span><span class="p">(</span><span class="n">textFiles</span><span class="o">.</span><span class="n">collect</span><span class="p">())</span>
-<span class="go">[(u&#39;.../1.txt&#39;, u&#39;1&#39;), (u&#39;.../2.txt&#39;, u&#39;2&#39;)]</span>
+<span class="go">[(&#39;.../1.txt&#39;, &#39;1&#39;), (&#39;.../2.txt&#39;, &#39;2&#39;)]</span>
 </pre></div>
 </div>
 </dd></dl>
@@ -1666,7 +1666,7 @@ If no storage level is specified defaults to (<code class="xref py py-class docu
 <code class="descname">pipe</code><span class="sig-paren">(</span><em>command</em>, <em>env=None</em>, <em>checkCode=False</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/pyspark/rdd.html#RDD.pipe"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#pyspark.RDD.pipe" title="Permalink to this definition">¶</a></dt>
 <dd><p>Return an RDD created by piping elements to a forked external process.</p>
 <div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="n">sc</span><span class="o">.</span><span class="n">parallelize</span><span class="p">([</span><span class="s1">&#39;1&#39;</span><span class="p">,</span> <span class="s1">&#39;2&#39;</span><span class="p">,</span> <span class="s1">&#39;&#39;</span><span class="p">,</span> <span class="s1">&#39;3&#39;</span><span class="p">])</span><span class="o">.</span><span class="n">pipe</span><span class="p">(</span><span class="s1">&#39;cat&#39;</span><span class="p">)</span><span class="o">.</span><span class="n">collect</span><span class="p">()</span>
-<span class="go">[u&#39;1&#39;, u&#39;2&#39;, u&#39;&#39;, u&#39;3&#39;]</span>
+<span class="go">[&#39;1&#39;, &#39;2&#39;, &#39;&#39;, &#39;3&#39;]</span>
 </pre></div>
 </div>
 <table class="docutils field-list" frame="void" rules="none">
@@ -1781,7 +1781,7 @@ using <cite>coalesce</cite>, which can avoid performing a shuffle.</p>
 
 <dl class="method">
 <dt id="pyspark.RDD.repartitionAndSortWithinPartitions">
-<code class="descname">repartitionAndSortWithinPartitions</code><span class="sig-paren">(</span><em>numPartitions=None</em>, <em>partitionFunc=&lt;function portable_hash&gt;</em>, <em>ascending=True</em>, <em>keyfunc=&lt;function &lt;lambda&gt;&gt;</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/pyspark/rdd.html#RDD.repartitionAndSortWithinPartitions"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#pyspark.RDD.repartitionAndSortWithinPartitions" title="Permalink to this definition">¶</a></dt>
+<code class="descname">repartitionAndSortWithinPartitions</code><span class="sig-paren">(</span><em>numPartitions=None</em>, <em>partitionFunc=&lt;function portable_hash&gt;</em>, <em>ascending=True</em>, <em>keyfunc=&lt;function RDD.&lt;lambda&gt;&gt;</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/pyspark/rdd.html#RDD.repartitionAndSortWithinPartitions"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#pyspark.RDD.repartitionAndSortWithinPartitions" title="Permalink to this definition">¶</a></dt>
 <dd><p>Repartition the RDD according to the given partitioner and, within each resulting partition,
 sort records by their keys.</p>
 <div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="n">rdd</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">parallelize</span><span class="p">([(</span><span class="mi">0</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="p">(</span><span class="mi">3</span><span class="p">,</span> <span class="mi">8</span><span class="p">),</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">6</span><span class="p">),</span> <span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="mi">8</span><span class="p">),</span> <span class="p">(</span><span class="mi">3</span><span class="p">,</span> <span class="mi">8</span><span class="p">),</span> <span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">3</span><span class="p"
 >)])</span>
@@ -2071,7 +2071,7 @@ RDD’s key and value types. The mechanism is as follows:</p>
 <span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">fileinput</span> <span class="k">import</span> <span class="nb">input</span><span class="p">,</span> <span class="n">hook_compressed</span>
 <span class="gp">&gt;&gt;&gt; </span><span class="n">result</span> <span class="o">=</span> <span class="nb">sorted</span><span class="p">(</span><span class="nb">input</span><span class="p">(</span><span class="n">glob</span><span class="p">(</span><span class="n">tempFile3</span><span class="o">.</span><span class="n">name</span> <span class="o">+</span> <span class="s2">&quot;/part*.gz&quot;</span><span class="p">),</span> <span class="n">openhook</span><span class="o">=</span><span class="n">hook_compressed</span><span class="p">))</span>
 <span class="gp">&gt;&gt;&gt; </span><span class="sa">b</span><span class="s1">&#39;&#39;</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="n">result</span><span class="p">)</span><span class="o">.</span><span class="n">decode</span><span class="p">(</span><span class="s1">&#39;utf-8&#39;</span><span class="p">)</span>
-<span class="go">u&#39;bar\nfoo\n&#39;</span>
+<span class="go">&#39;bar\nfoo\n&#39;</span>
 </pre></div>
 </div>
 </dd></dl>
@@ -2082,7 +2082,7 @@ RDD’s key and value types. The mechanism is as follows:</p>
 <dd><p>Assign a name to this RDD.</p>
 <div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="n">rdd1</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">parallelize</span><span class="p">([</span><span class="mi">1</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
 <span class="gp">&gt;&gt;&gt; </span><span class="n">rdd1</span><span class="o">.</span><span class="n">setName</span><span class="p">(</span><span class="s1">&#39;RDD1&#39;</span><span class="p">)</span><span class="o">.</span><span class="n">name</span><span class="p">()</span>
-<span class="go">u&#39;RDD1&#39;</span>
+<span class="go">&#39;RDD1&#39;</span>
 </pre></div>
 </div>
 </dd></dl>
@@ -2102,7 +2102,7 @@ RDD’s key and value types. The mechanism is as follows:</p>
 
 <dl class="method">
 <dt id="pyspark.RDD.sortByKey">
-<code class="descname">sortByKey</code><span class="sig-paren">(</span><em>ascending=True</em>, <em>numPartitions=None</em>, <em>keyfunc=&lt;function &lt;lambda&gt;&gt;</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/pyspark/rdd.html#RDD.sortByKey"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#pyspark.RDD.sortByKey" title="Permalink to this definition">¶</a></dt>
+<code class="descname">sortByKey</code><span class="sig-paren">(</span><em>ascending=True</em>, <em>numPartitions=None</em>, <em>keyfunc=&lt;function RDD.&lt;lambda&gt;&gt;</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/pyspark/rdd.html#RDD.sortByKey"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#pyspark.RDD.sortByKey" title="Permalink to this definition">¶</a></dt>
 <dd><p>Sorts this RDD, which is assumed to consist of (key, value) pairs.
 # noqa</p>
 <div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="n">tmp</span> <span class="o">=</span> <span class="p">[(</span><span class="s1">&#39;a&#39;</span><span class="p">,</span> <span class="mi">1</span><span class="p">),</span> <span class="p">(</span><span class="s1">&#39;b&#39;</span><span class="p">,</span> <span class="mi">2</span><span class="p">),</span> <span class="p">(</span><span class="s1">&#39;1&#39;</span><span class="p">,</span> <span class="mi">3</span><span class="p">),</span> <span class="p">(</span><span class="s1">&#39;d&#39;</span><span class="p">,</span> <span class="mi">4</span><span class="p">),</span> <span class="p">(</span><span class="s1">&#39;2&#39;</span><span class="p">,</span> <span class="mi">5</span><span class="p">)]</span>
@@ -2646,7 +2646,7 @@ When batching is used, this will be called with an array of objects.</p>
 
 <dl class="method">
 <dt id="pyspark.PickleSerializer.loads">
-<code class="descname">loads</code><span class="sig-paren">(</span><em>obj</em>, <em>encoding=None</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/pyspark/serializers.html#PickleSerializer.loads"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#pyspark.PickleSerializer.loads" title="Permalink to this definition">¶</a></dt>
+<code class="descname">loads</code><span class="sig-paren">(</span><em>obj</em>, <em>encoding='bytes'</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/pyspark/serializers.html#PickleSerializer.loads"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#pyspark.PickleSerializer.loads" title="Permalink to this definition">¶</a></dt>
 <dd><p>Deserialize an object from a byte array.</p>
 </dd></dl>
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org