You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iceberg.apache.org by gi...@apache.org on 2022/11/29 07:55:59 UTC

[iceberg-docs] branch asf-site updated: deploy: 591c4b4574eb9a1d688124aac0818c02927aefce

This is an automated email from the ASF dual-hosted git repository.

github-bot pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/iceberg-docs.git


The following commit(s) were added to refs/heads/asf-site by this push:
     new 1f478df8 deploy: 591c4b4574eb9a1d688124aac0818c02927aefce
1f478df8 is described below

commit 1f478df84b811780c8599a6fa0d9a5883d432733
Author: Fokko <Fo...@users.noreply.github.com>
AuthorDate: Tue Nov 29 07:55:54 2022 +0000

    deploy: 591c4b4574eb9a1d688124aac0818c02927aefce
---
 docs/1.1.0/api/index.html                          |  63 ++++
 docs/1.1.0/aws/index.html                          | 292 ++++++++++++++++++
 docs/1.1.0/configuration/index.html                |  27 ++
 docs/1.1.0/custom-catalog/index.html               | 208 +++++++++++++
 docs/1.1.0/dell/index.html                         |  63 ++++
 docs/1.1.0/docssearch.json                         |   2 +-
 docs/1.1.0/evolution/index.html                    |  37 +++
 docs/1.1.0/flink-connector/index.html              |  85 ++++++
 docs/1.1.0/flink/flink-connector/index.html        |   1 +
 docs/1.1.0/flink/index.html                        | 335 +++++++++++++++++++++
 docs/1.1.0/getting-started/index.html              |  53 ++++
 docs/1.1.0/hive/index.html                         | 150 +++++++++
 docs/1.1.0/index.html                              |  14 +-
 docs/1.1.0/index.xml                               |  49 ++-
 docs/1.1.0/java-api-quickstart/index.html          |  88 ++++++
 docs/1.1.0/java/api/index.html                     |   1 +
 docs/1.1.0/java/custom-catalog/index.html          |   1 +
 docs/1.1.0/java/quickstart/index.html              |   1 +
 docs/1.1.0/jdbc/index.html                         |  41 +++
 docs/1.1.0/maintenance/index.html                  |  55 ++++
 docs/1.1.0/nessie/index.html                       |  82 +++++
 docs/1.1.0/partitioning/index.html                 |  28 ++
 docs/1.1.0/performance/index.html                  |  20 ++
 docs/1.1.0/reliability/index.html                  |  20 ++
 docs/1.1.0/schemas/index.html                      |  20 ++
 docs/1.1.0/sitemap.xml                             |   2 +-
 docs/1.1.0/spark-configuration/index.html          |  45 +++
 docs/1.1.0/spark-ddl/index.html                    | 134 +++++++++
 docs/1.1.0/spark-procedures/index.html             |  80 +++++
 docs/1.1.0/spark-queries/index.html                |  95 ++++++
 docs/1.1.0/spark-structured-streaming/index.html   |  53 ++++
 docs/1.1.0/spark-writes/index.html                 | 127 ++++++++
 docs/1.1.0/spark/getting-started/index.html        |   1 +
 docs/1.1.0/spark/spark-configuration/index.html    |   1 +
 docs/1.1.0/spark/spark-ddl/index.html              |   1 +
 docs/1.1.0/spark/spark-procedures/index.html       |   1 +
 docs/1.1.0/spark/spark-queries/index.html          |   1 +
 .../spark/spark-structured-streaming/index.html    |   1 +
 docs/1.1.0/spark/spark-writes/index.html           |   1 +
 docs/1.1.0/tables/configuration/index.html         |   1 +
 docs/1.1.0/tables/evolution/index.html             |   1 +
 docs/1.1.0/tables/maintenance/index.html           |   1 +
 docs/1.1.0/tables/partitioning/index.html          |   1 +
 docs/1.1.0/tables/performance/index.html           |   1 +
 docs/1.1.0/tables/reliability/index.html           |   1 +
 docs/1.1.0/tables/schemas/index.html               |   1 +
 46 files changed, 2281 insertions(+), 5 deletions(-)

diff --git a/docs/1.1.0/api/index.html b/docs/1.1.0/api/index.html
new file mode 100644
index 00000000..8ec476c2
--- /dev/null
+++ b/docs/1.1.0/api/index.html
@@ -0,0 +1,63 @@
+<!doctype html><html><head><meta charset=utf-8><meta http-equiv=x-ua-compatible content="IE=edge"><meta name=viewport content="width=device-width,initial-scale=1"><meta name=description content><meta name=author content><title>Java API</title><link href=../css/bootstrap.css rel=stylesheet><link href=../css/markdown.css rel=stylesheet><link href=../css/katex.min.css rel=stylesheet><link href=../css/iceberg-theme.css rel=stylesheet><link href=../font-awesome-4.7.0/css/font-awesome.min.css  [...]
+<span class=sr-only>Toggle navigation</span>
+<span class=icon-bar></span>
+<span class=icon-bar></span>
+<span class=icon-bar></span></button>
+<a class="page-scroll navbar-brand" href=https://iceberg.apache.org/><img class=top-navbar-logo src=https://iceberg.apache.org/docs/1.1.0//img/iceberg-logo-icon.png> Apache Iceberg</a></div><div><input type=search class=form-control id=search-input placeholder=Search... maxlength=64 data-hotkeys=s/></div><div class=versions-dropdown><span>1.0.0</span> <i class="fa fa-chevron-down"></i><div class=versions-dropdown-content><ul><li class=versions-dropdown-selection><a href=https://iceberg.a [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Tables class=collapse><ul class=sub-menu><li><a href=../configuration/>Configuration</a></li><li><a href=../evolution/>Evolution</a></li><li><a href=../maintenance/>Maintenance</a></li><li><a href=../partitioning/>Partitioning</a></li><li><a href=../performance/>Performance</a></li><li><a href=../reliability/>Reliability</a></li><li><a href=../schemas/>Schemas</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collaps [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Spark class=collapse><ul class=sub-menu><li><a href=../spark-ddl/>DDL</a></li><li><a href=../getting-started/>Getting Started</a></li><li><a href=../spark-procedures/>Procedures</a></li><li><a href=../spark-queries/>Queries</a></li><li><a href=../spark-structured-streaming/>Structured Streaming</a></li><li><a href=../spark-writes/>Writes</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Flink class=collapse><ul class=sub-menu><li><a href=../flink/>Enabling Iceberg in Flink</a></li><li><a href=../flink-connector/>Flink Connector</a></li></ul></div><li><a href=../hive/><span>Hive</span></a></li><li><a target=_blank href=https://trino.io/docs/current/connector/iceberg.html><span>Trino</span></a></li><li><a target=_blank href=https://prestodb.io/docs/current/connector/iceberg.html><span>Presto</span></a></li><li><a target=_ [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Integrations class=collapse><ul class=sub-menu><li><a href=../aws/>AWS</a></li><li><a href=../dell/>Dell</a></li><li><a href=../jdbc/>JDBC</a></li><li><a href=../nessie/>Nessie</a></li></ul></div><li><a class=chevron-toggle data-toggle=collapse data-parent=full href=#API><span>API</span>
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=API class="collapse in"><ul class=sub-menu><li><a href=../java-api-quickstart/>Java Quickstart</a></li><li><a id=active href=../api/>Java API</a></li><li><a href=../custom-catalog/>Java Custom Catalog</a></li></ul></div><li><a href=https://iceberg.apache.org/docs/1.1.0/../../javadoc/latest><span>Javadoc</span></a></li></div></div><div id=content class=markdown-body><div class=margin-for-toc><h1 id=iceberg-java-api>Iceberg Java API</h1><h [...]
+</span></span></code></pre></div><p>To configure a scan, call <code>filter</code> and <code>select</code> on the <code>TableScan</code> to get a new <code>TableScan</code> with those changes.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-java data-lang=java><span style=display:flex><span>TableScan filteredScan <span style=color:#f92672>=</span> scan<span style=color:#f92672>.</span><span  [...]
+</span></span></code></pre></div><p>Calls to configuration methods create a new <code>TableScan</code> so that each <code>TableScan</code> is immutable and won&rsquo;t change unexpectedly if shared across threads.</p><p>When a scan is configured, <code>planFiles</code>, <code>planTasks</code>, and <code>schema</code> are used to return files, tasks, and the read projection.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4; [...]
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>filter</span><span style=color:#f92672>(</span>Expressions<span style=color:#f92672>.</span><span style=color:#a6e22e>equal</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;id&#34;</span><span style=color:#f92672>,</span> <span style=color:#ae81ff>5</span><span style=color:#f92672>))</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>select</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;id&#34;</span><span style=color:#f92672>,</span> <span style=color:#e6db74>&#34;data&#34;</span><span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>Schema projection <span style=color:#f92672>=</span> scan<span style=color:#f92672>.</span><span style=color:#a6e22e>schema</span><span style=color:#f92672>();</span>
+</span></span><span style=display:flex><span>Iterable<span style=color:#f92672>&lt;</span>CombinedScanTask<span style=color:#f92672>&gt;</span> tasks <span style=color:#f92672>=</span> scan<span style=color:#f92672>.</span><span style=color:#a6e22e>planTasks</span><span style=color:#f92672>();</span>
+</span></span></code></pre></div><p>Use <code>asOfTime</code> or <code>useSnapshot</code> to configure the table snapshot for time travel queries.</p><h4 id=row-level>Row level</h4><p>Iceberg table scans start by creating a <code>ScanBuilder</code> object with <code>IcebergGenerics.read</code>.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-java data-lang=java><span style=display:flex><spa [...]
+</span></span></code></pre></div><p>To configure a scan, call <code>where</code> and <code>select</code> on the <code>ScanBuilder</code> to get a new <code>ScanBuilder</code> with those changes.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-java data-lang=java><span style=display:flex><span>scanBuilder<span style=color:#f92672>.</span><span style=color:#a6e22e>where</span><span style=colo [...]
+</span></span></code></pre></div><p>When a scan is configured, call method <code>build</code> to execute scan. <code>build</code> return <code>CloseableIterable&lt;Record></code></p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-java data-lang=java><span style=display:flex><span>CloseableIterable<span style=color:#f92672>&lt;</span>Record<span style=color:#f92672>&gt;</span> result <span styl [...]
+</span></span><span style=display:flex><span>        <span style=color:#f92672>.</span><span style=color:#a6e22e>where</span><span style=color:#f92672>(</span>Expressions<span style=color:#f92672>.</span><span style=color:#a6e22e>lessThan</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;id&#34;</span><span style=color:#f92672>,</span> <span style=color:#ae81ff>5</span><span style=color:#f92672>))</span>
+</span></span><span style=display:flex><span>        <span style=color:#f92672>.</span><span style=color:#a6e22e>build</span><span style=color:#f92672>();</span>
+</span></span></code></pre></div><p>where <code>Record</code> is Iceberg record for iceberg-data module <code>org.apache.iceberg.data.Record</code>.</p><h3 id=update-operations>Update operations</h3><p><code>Table</code> also exposes operations that update the table. These operations use a builder pattern, <a href=../../../javadoc/1.0.0/index.html?org/apache/iceberg/PendingUpdate.html><code>PendingUpdate</code></a>, that commits when <code>PendingUpdate#commit</code> is called.</p><p>For [...]
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>addColumn</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;count&#34;</span><span style=color:#f92672>,</span> Types<span style=color:#f92672>.</span><span style=color:#a6e22e>LongType</span><span style=color:#f92672>.</span><span style=color:#a6e22e>get</span><span style=color:#f92672>())</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>commit</span><span style=color:#f92672>();</span>
+</span></span></code></pre></div><p>Available operations to update a table are:</p><ul><li><code>updateSchema</code> &ndash; update the table schema</li><li><code>updateProperties</code> &ndash; update table properties</li><li><code>updateLocation</code> &ndash; update the table&rsquo;s base location</li><li><code>newAppend</code> &ndash; used to append data files</li><li><code>newFastAppend</code> &ndash; used to append data files, will not compact metadata</li><li><code>newOverwrite</c [...]
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#75715e>// commit operations to the transaction
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>t<span style=color:#f92672>.</span><span style=color:#a6e22e>newDelete</span><span style=color:#f92672>().</span><span style=color:#a6e22e>deleteFromRowFilter</span><span style=color:#f92672>(</span>filter<span style=color:#f92672>).</span><span style=color:#a6e22e>commit</span><span style=color:#f92672>();</span>
+</span></span><span style=display:flex><span>t<span style=color:#f92672>.</span><span style=color:#a6e22e>newAppend</span><span style=color:#f92672>().</span><span style=color:#a6e22e>appendFile</span><span style=color:#f92672>(</span>data<span style=color:#f92672>).</span><span style=color:#a6e22e>commit</span><span style=color:#f92672>();</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#75715e>// commit all the changes to the table
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>t<span style=color:#f92672>.</span><span style=color:#a6e22e>commitTransaction</span><span style=color:#f92672>();</span>
+</span></span></code></pre></div><h2 id=types>Types</h2><p>Iceberg data types are located in the <a href=../../../javadoc/1.0.0/index.html?org/apache/iceberg/types/package-summary.html><code>org.apache.iceberg.types</code> package</a>.</p><h3 id=primitives>Primitives</h3><p>Primitive type instances are available from static methods in each type class. Types without parameters use <code>get</code>, and types like <code>decimal</code> use factory methods:</p><div class=highlight><pre tabin [...]
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>Types<span style=color:#f92672>.</span><span style=color:#a6e22e>DoubleType</span><span style=color:#f92672>.</span><span style=color:#a6e22e>get</span><span style=color:#f92672>()</span>     <span style=color:#75715e>// double
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>Types<span style=color:#f92672>.</span><span style=color:#a6e22e>DecimalType</span><span style=color:#f92672>.</span><span style=color:#a6e22e>of</span><span style=color:#f92672>(</span><span style=color:#ae81ff>9</span><span style=color:#f92672>,</span> <span style=color:#ae81ff>2</span><span style=color:#f92672>)</span> <span style=color:#75715e>// decimal(9, 2)
+</span></span></span></code></pre></div><h3 id=nested-types>Nested types</h3><p>Structs, maps, and lists are created using factory methods in type classes.</p><p>Like struct fields, map keys or values and list elements are tracked as nested fields. Nested fields track <a href=../evolution#correctness>field IDs</a> and nullability.</p><p>Struct fields are created using <code>NestedField.optional</code> or <code>NestedField.required</code>. Map value and list element nullability is set in  [...]
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>StructType struct <span style=color:#f92672>=</span> Struct<span style=color:#f92672>.</span><span style=color:#a6e22e>of</span><span style=color:#f92672>(</span>
+</span></span><span style=display:flex><span>    Types<span style=color:#f92672>.</span><span style=color:#a6e22e>NestedField</span><span style=color:#f92672>.</span><span style=color:#a6e22e>required</span><span style=color:#f92672>(</span><span style=color:#ae81ff>1</span><span style=color:#f92672>,</span> <span style=color:#e6db74>&#34;id&#34;</span><span style=color:#f92672>,</span> Types<span style=color:#f92672>.</span><span style=color:#a6e22e>IntegerType</span><span style=color:# [...]
+</span></span><span style=display:flex><span>    Types<span style=color:#f92672>.</span><span style=color:#a6e22e>NestedField</span><span style=color:#f92672>.</span><span style=color:#a6e22e>optional</span><span style=color:#f92672>(</span><span style=color:#ae81ff>2</span><span style=color:#f92672>,</span> <span style=color:#e6db74>&#34;data&#34;</span><span style=color:#f92672>,</span> Types<span style=color:#f92672>.</span><span style=color:#a6e22e>StringType</span><span style=color: [...]
+</span></span><span style=display:flex><span>  <span style=color:#f92672>)</span>
+</span></span></code></pre></div><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-java data-lang=java><span style=display:flex><span><span style=color:#75715e>// map&lt;1 key: int, 2 value: optional string&gt;
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>MapType map <span style=color:#f92672>=</span> MapType<span style=color:#f92672>.</span><span style=color:#a6e22e>ofOptional</span><span style=color:#f92672>(</span>
+</span></span><span style=display:flex><span>    <span style=color:#ae81ff>1</span><span style=color:#f92672>,</span> <span style=color:#ae81ff>2</span><span style=color:#f92672>,</span>
+</span></span><span style=display:flex><span>    Types<span style=color:#f92672>.</span><span style=color:#a6e22e>IntegerType</span><span style=color:#f92672>.</span><span style=color:#a6e22e>get</span><span style=color:#f92672>(),</span>
+</span></span><span style=display:flex><span>    Types<span style=color:#f92672>.</span><span style=color:#a6e22e>StringType</span><span style=color:#f92672>.</span><span style=color:#a6e22e>get</span><span style=color:#f92672>()</span>
+</span></span><span style=display:flex><span>  <span style=color:#f92672>)</span>
+</span></span></code></pre></div><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-java data-lang=java><span style=display:flex><span><span style=color:#75715e>// array&lt;1 element: int&gt;
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>ListType list <span style=color:#f92672>=</span> ListType<span style=color:#f92672>.</span><span style=color:#a6e22e>ofRequired</span><span style=color:#f92672>(</span><span style=color:#ae81ff>1</span><span style=color:#f92672>,</span> IntegerType<span style=color:#f92672>.</span><span style=color:#a6e22e>get</span><span style=color:#f92672>());</span>
+</span></span></code></pre></div><h2 id=expressions>Expressions</h2><p>Iceberg&rsquo;s expressions are used to configure table scans. To create expressions, use the factory methods in <a href=../../../javadoc/1.0.0/index.html?org/apache/iceberg/expressions/Expressions.html><code>Expressions</code></a>.</p><p>Supported predicate expressions are:</p><ul><li><code>isNull</code></li><li><code>notNull</code></li><li><code>equal</code></li><li><code>notEqual</code></li><li><code>lessThan</code [...]
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>filter</span><span style=color:#f92672>(</span>Expressions<span style=color:#f92672>.</span><span style=color:#a6e22e>greaterThanOrEqual</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;x&#34;</span><span style=color:#f92672>,</span> <span style=color:#ae81ff>5</span><span style=color:#f92672>))</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>filter</span><span style=color:#f92672>(</span>Expressions<span style=color:#f92672>.</span><span style=color:#a6e22e>lessThan</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;x&#34;</span><span style=color:#f92672>,</span> <span style=color:#ae81ff>10</span><span style=color:#f92672>))</span>
+</span></span></code></pre></div><h2 id=modules>Modules</h2><p>Iceberg table support is organized in library modules:</p><ul><li><code>iceberg-common</code> contains utility classes used in other modules</li><li><code>iceberg-api</code> contains the public Iceberg API, including expressions, types, tables, and operations</li><li><code>iceberg-arrow</code> is an implementation of the Iceberg type system for reading and writing data stored in Iceberg tables using Apache Arrow as the in-mem [...]
+<script src=https://iceberg.apache.org/docs/1.1.0//js/jquery.easing.min.js></script>
+<script type=text/javascript src=https://iceberg.apache.org/docs/1.1.0//js/search.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/bootstrap.min.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/iceberg-theme.js></script></html>
\ No newline at end of file
diff --git a/docs/1.1.0/aws/index.html b/docs/1.1.0/aws/index.html
new file mode 100644
index 00000000..a02c042a
--- /dev/null
+++ b/docs/1.1.0/aws/index.html
@@ -0,0 +1,292 @@
+<!doctype html><html><head><meta charset=utf-8><meta http-equiv=x-ua-compatible content="IE=edge"><meta name=viewport content="width=device-width,initial-scale=1"><meta name=description content><meta name=author content><title>AWS</title><link href=../css/bootstrap.css rel=stylesheet><link href=../css/markdown.css rel=stylesheet><link href=../css/katex.min.css rel=stylesheet><link href=../css/iceberg-theme.css rel=stylesheet><link href=../font-awesome-4.7.0/css/font-awesome.min.css rel=s [...]
+<span class=sr-only>Toggle navigation</span>
+<span class=icon-bar></span>
+<span class=icon-bar></span>
+<span class=icon-bar></span></button>
+<a class="page-scroll navbar-brand" href=https://iceberg.apache.org/><img class=top-navbar-logo src=https://iceberg.apache.org/docs/1.1.0//img/iceberg-logo-icon.png> Apache Iceberg</a></div><div><input type=search class=form-control id=search-input placeholder=Search... maxlength=64 data-hotkeys=s/></div><div class=versions-dropdown><span>1.0.0</span> <i class="fa fa-chevron-down"></i><div class=versions-dropdown-content><ul><li class=versions-dropdown-selection><a href=https://iceberg.a [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Tables class=collapse><ul class=sub-menu><li><a href=../configuration/>Configuration</a></li><li><a href=../evolution/>Evolution</a></li><li><a href=../maintenance/>Maintenance</a></li><li><a href=../partitioning/>Partitioning</a></li><li><a href=../performance/>Performance</a></li><li><a href=../reliability/>Reliability</a></li><li><a href=../schemas/>Schemas</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collaps [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Spark class=collapse><ul class=sub-menu><li><a href=../spark-ddl/>DDL</a></li><li><a href=../getting-started/>Getting Started</a></li><li><a href=../spark-procedures/>Procedures</a></li><li><a href=../spark-queries/>Queries</a></li><li><a href=../spark-structured-streaming/>Structured Streaming</a></li><li><a href=../spark-writes/>Writes</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Flink class=collapse><ul class=sub-menu><li><a href=../flink/>Enabling Iceberg in Flink</a></li><li><a href=../flink-connector/>Flink Connector</a></li></ul></div><li><a href=../hive/><span>Hive</span></a></li><li><a target=_blank href=https://trino.io/docs/current/connector/iceberg.html><span>Trino</span></a></li><li><a target=_blank href=https://prestodb.io/docs/current/connector/iceberg.html><span>Presto</span></a></li><li><a target=_ [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Integrations class="collapse in"><ul class=sub-menu><li><a id=active href=../aws/>AWS</a></li><li><a href=../dell/>Dell</a></li><li><a href=../jdbc/>JDBC</a></li><li><a href=../nessie/>Nessie</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href=#API><span>API</span>
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=API class=collapse><ul class=sub-menu><li><a href=../java-api-quickstart/>Java Quickstart</a></li><li><a href=../api/>Java API</a></li><li><a href=../custom-catalog/>Java Custom Catalog</a></li></ul></div><li><a href=https://iceberg.apache.org/docs/1.1.0/../../javadoc/latest><span>Javadoc</span></a></li></div></div><div id=content class=markdown-body><div class=margin-for-toc><h1 id=iceberg-aws-integrations>Iceberg AWS Integrations</h1>< [...]
+This section describes how to use Iceberg with AWS.</p><h2 id=enabling-aws-integration>Enabling AWS Integration</h2><p>The <code>iceberg-aws</code> module is bundled with Spark and Flink engine runtimes for all versions from <code>0.11.0</code> onwards.
+However, the AWS clients are not bundled so that you can use the same client version as your application.
+You will need to provide the AWS v2 SDK because that is what Iceberg depends on.
+You can choose to use the <a href=https://mvnrepository.com/artifact/software.amazon.awssdk/bundle>AWS SDK bundle</a>,
+or individual AWS client packages (Glue, S3, DynamoDB, KMS, STS) if you would like to have a minimal dependency footprint.</p><p>All the default AWS clients use the <a href=https://mvnrepository.com/artifact/software.amazon.awssdk/url-connection-client>URL Connection HTTP Client</a>
+for HTTP connection management.
+This dependency is not part of the AWS SDK bundle and needs to be added separately.
+To choose a different HTTP client library such as <a href=https://mvnrepository.com/artifact/software.amazon.awssdk/apache-client>Apache HTTP Client</a>,
+see the section <a href=#aws-client-customization>client customization</a> for more details.</p><p>All the AWS module features can be loaded through custom catalog properties,
+you can go to the documentations of each engine to see how to load a custom catalog.
+Here are some examples.</p><h3 id=spark>Spark</h3><p>For example, to use AWS features with Spark 3.3 (with scala 2.12) and AWS clients version 2.17.257, you can start the Spark SQL shell with:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sh data-lang=sh><span style=display:flex><span><span style=color:#75715e># add Iceberg dependency</span>
+</span></span><span style=display:flex><span>ICEBERG_VERSION<span style=color:#f92672>=</span>1.0.0
+</span></span><span style=display:flex><span>DEPENDENCIES<span style=color:#f92672>=</span><span style=color:#e6db74>&#34;org.apache.iceberg:iceberg-spark-runtime-3.3_2.12:</span>$ICEBERG_VERSION<span style=color:#e6db74>&#34;</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#75715e># add AWS dependnecy</span>
+</span></span><span style=display:flex><span>AWS_SDK_VERSION<span style=color:#f92672>=</span>2.17.257
+</span></span><span style=display:flex><span>AWS_MAVEN_GROUP<span style=color:#f92672>=</span>software.amazon.awssdk
+</span></span><span style=display:flex><span>AWS_PACKAGES<span style=color:#f92672>=(</span>
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#34;bundle&#34;</span>
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#34;url-connection-client&#34;</span>
+</span></span><span style=display:flex><span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span><span style=color:#66d9ef>for</span> pkg in <span style=color:#e6db74>&#34;</span><span style=color:#e6db74>${</span>AWS_PACKAGES[@]<span style=color:#e6db74>}</span><span style=color:#e6db74>&#34;</span>; <span style=color:#66d9ef>do</span>
+</span></span><span style=display:flex><span>    DEPENDENCIES<span style=color:#f92672>+=</span><span style=color:#e6db74>&#34;,</span>$AWS_MAVEN_GROUP<span style=color:#e6db74>:</span>$pkg<span style=color:#e6db74>:</span>$AWS_SDK_VERSION<span style=color:#e6db74>&#34;</span>
+</span></span><span style=display:flex><span><span style=color:#66d9ef>done</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#75715e># start Spark SQL client shell</span>
+</span></span><span style=display:flex><span>spark-sql --packages $DEPENDENCIES <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    --conf spark.sql.catalog.my_catalog<span style=color:#f92672>=</span>org.apache.iceberg.spark.SparkCatalog <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    --conf spark.sql.catalog.my_catalog.warehouse<span style=color:#f92672>=</span>s3://my-bucket/my/key/prefix <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    --conf spark.sql.catalog.my_catalog.catalog-impl<span style=color:#f92672>=</span>org.apache.iceberg.aws.glue.GlueCatalog <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    --conf spark.sql.catalog.my_catalog.io-impl<span style=color:#f92672>=</span>org.apache.iceberg.aws.s3.S3FileIO
+</span></span></code></pre></div><p>As you can see, In the shell command, we use <code>--packages</code> to specify the additional AWS bundle and HTTP client dependencies with their version as <code>2.17.257</code>.</p><h3 id=flink>Flink</h3><p>To use AWS module with Flink, you can download the necessary dependencies and specify them when starting the Flink SQL client:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-s [...]
+</span></span><span style=display:flex><span>ICEBERG_VERSION<span style=color:#f92672>=</span>1.0.0
+</span></span><span style=display:flex><span>MAVEN_URL<span style=color:#f92672>=</span>https://repo1.maven.org/maven2
+</span></span><span style=display:flex><span>ICEBERG_MAVEN_URL<span style=color:#f92672>=</span>$MAVEN_URL/org/apache/iceberg
+</span></span><span style=display:flex><span>wget $ICEBERG_MAVEN_URL/iceberg-flink-runtime/$ICEBERG_VERSION/iceberg-flink-runtime-$ICEBERG_VERSION.jar
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#75715e># download AWS dependnecy</span>
+</span></span><span style=display:flex><span>AWS_SDK_VERSION<span style=color:#f92672>=</span>2.17.257
+</span></span><span style=display:flex><span>AWS_MAVEN_URL<span style=color:#f92672>=</span>$MAVEN_URL/software/amazon/awssdk
+</span></span><span style=display:flex><span>AWS_PACKAGES<span style=color:#f92672>=(</span>
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#34;bundle&#34;</span>
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#34;url-connection-client&#34;</span>
+</span></span><span style=display:flex><span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span><span style=color:#66d9ef>for</span> pkg in <span style=color:#e6db74>&#34;</span><span style=color:#e6db74>${</span>AWS_PACKAGES[@]<span style=color:#e6db74>}</span><span style=color:#e6db74>&#34;</span>; <span style=color:#66d9ef>do</span>
+</span></span><span style=display:flex><span>    wget $AWS_MAVEN_URL/$pkg/$AWS_SDK_VERSION/$pkg-$AWS_SDK_VERSION.jar
+</span></span><span style=display:flex><span><span style=color:#66d9ef>done</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#75715e># start Flink SQL client shell</span>
+</span></span><span style=display:flex><span>/path/to/bin/sql-client.sh embedded <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    -j iceberg-flink-runtime-$ICEBERG_VERSION.jar <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    -j bundle-$AWS_SDK_VERSION.jar <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    -j url-connection-client-$AWS_SDK_VERSION.jar <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    shell
+</span></span></code></pre></div><p>With those dependencies, you can create a Flink catalog like the following:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>CREATE</span> <span style=color:#66d9ef>CATALOG</span> my_catalog <span style=color:#66d9ef>WITH</span> (
+</span></span><span style=display:flex><span>  <span style=color:#e6db74>&#39;type&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;iceberg&#39;</span>,
+</span></span><span style=display:flex><span>  <span style=color:#e6db74>&#39;warehouse&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;s3://my-bucket/my/key/prefix&#39;</span>,
+</span></span><span style=display:flex><span>  <span style=color:#e6db74>&#39;catalog-impl&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;org.apache.iceberg.aws.glue.GlueCatalog&#39;</span>,
+</span></span><span style=display:flex><span>  <span style=color:#e6db74>&#39;io-impl&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;org.apache.iceberg.aws.s3.S3FileIO&#39;</span>
+</span></span><span style=display:flex><span>);
+</span></span></code></pre></div><p>You can also specify the catalog configurations in <code>sql-client-defaults.yaml</code> to preload it:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-yaml data-lang=yaml><span style=display:flex><span><span style=color:#f92672>catalogs</span>: 
+</span></span><span style=display:flex><span>  - <span style=color:#f92672>name</span>: <span style=color:#ae81ff>my_catalog</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>type</span>: <span style=color:#ae81ff>iceberg</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>warehouse</span>: <span style=color:#ae81ff>s3://my-bucket/my/key/prefix</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>catalog-impl</span>: <span style=color:#ae81ff>org.apache.iceberg.aws.glue.GlueCatalog</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>io-impl</span>: <span style=color:#ae81ff>org.apache.iceberg.aws.s3.S3FileIO</span>
+</span></span></code></pre></div><h3 id=hive>Hive</h3><p>To use AWS module with Hive, you can download the necessary dependencies similar to the Flink example,
+and then add them to the Hive classpath or add the jars at runtime in CLI:</p><pre tabindex=0><code>add jar /my/path/to/iceberg-hive-runtime.jar;
+add jar /my/path/to/aws/bundle.jar;
+add jar /my/path/to/aws/url-connection-client.jar;
+</code></pre><p>With those dependencies, you can register a Glue catalog and create external tables in Hive at runtime in CLI by:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>SET</span> iceberg.engine.hive.enabled<span style=color:#f92672>=</span><span style=color:#66d9ef>true</span>;
+</span></span><span style=display:flex><span><span style=color:#66d9ef>SET</span> hive.vectorized.execution.enabled<span style=color:#f92672>=</span><span style=color:#66d9ef>false</span>;
+</span></span><span style=display:flex><span><span style=color:#66d9ef>SET</span> iceberg.<span style=color:#66d9ef>catalog</span>.glue.<span style=color:#66d9ef>catalog</span><span style=color:#f92672>-</span>impl<span style=color:#f92672>=</span>org.apache.iceberg.aws.glue.GlueCatalog;
+</span></span><span style=display:flex><span><span style=color:#66d9ef>SET</span> iceberg.<span style=color:#66d9ef>catalog</span>.glue.warehouse<span style=color:#f92672>=</span>s3:<span style=color:#f92672>//</span>my<span style=color:#f92672>-</span>bucket<span style=color:#f92672>/</span>my<span style=color:#f92672>/</span><span style=color:#66d9ef>key</span><span style=color:#f92672>/</span><span style=color:#66d9ef>prefix</span>;
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#75715e>-- suppose you have an Iceberg table database_a.table_a created by GlueCatalog
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span><span style=color:#66d9ef>CREATE</span> <span style=color:#66d9ef>EXTERNAL</span> <span style=color:#66d9ef>TABLE</span> database_a.table_a
+</span></span><span style=display:flex><span>STORED <span style=color:#66d9ef>BY</span> <span style=color:#e6db74>&#39;org.apache.iceberg.mr.hive.HiveIcebergStorageHandler&#39;</span>
+</span></span><span style=display:flex><span>TBLPROPERTIES (<span style=color:#e6db74>&#39;iceberg.catalog&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;glue&#39;</span>);
+</span></span></code></pre></div><p>You can also preload the catalog by setting the configurations above in <code>hive-site.xml</code>.</p><h2 id=catalogs>Catalogs</h2><p>There are multiple different options that users can choose to build an Iceberg catalog with AWS.</p><h3 id=glue-catalog>Glue Catalog</h3><p>Iceberg enables the use of <a href=https://aws.amazon.com/glue>AWS Glue</a> as the <code>Catalog</code> implementation.
+When used, an Iceberg namespace is stored as a <a href=https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-catalog-databases.html>Glue Database</a>,
+an Iceberg table is stored as a <a href=https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-catalog-tables.html>Glue Table</a>,
+and every Iceberg table version is stored as a <a href=https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-catalog-tables.html#aws-glue-api-catalog-tables-TableVersion>Glue TableVersion</a>.
+You can start using Glue catalog by specifying the <code>catalog-impl</code> as <code>org.apache.iceberg.aws.glue.GlueCatalog</code>,
+just like what is shown in the <a href=#enabling-aws-integration>enabling AWS integration</a> section above.
+More details about loading the catalog can be found in individual engine pages, such as <a href=../spark-configuration/#loading-a-custom-catalog>Spark</a> and <a href=../flink/#creating-catalogs-and-using-catalogs>Flink</a>.</p><h4 id=glue-catalog-id>Glue Catalog ID</h4><p>There is a unique Glue metastore in each AWS account and each AWS region.
+By default, <code>GlueCatalog</code> chooses the Glue metastore to use based on the user&rsquo;s default AWS client credential and region setup.
+You can specify the Glue catalog ID through <code>glue.id</code> catalog property to point to a Glue catalog in a different AWS account.
+The Glue catalog ID is your numeric AWS account ID.
+If the Glue catalog is in a different region, you should configure you AWS client to point to the correct region,
+see more details in <a href=#aws-client-customization>AWS client customization</a>.</p><h4 id=skip-archive>Skip Archive</h4><p>By default, Glue stores all the table versions created and user can rollback a table to any historical version if needed.
+However, if you are streaming data to Iceberg, this will easily create a lot of Glue table versions.
+Therefore, it is recommended to turn off the archive feature in Glue by setting <code>glue.skip-archive</code> to <code>true</code>.
+For more details, please read <a href=https://docs.aws.amazon.com/general/latest/gr/glue.html>Glue Quotas</a> and the <a href=https://docs.aws.amazon.com/glue/latest/webapi/API_UpdateTable.html>UpdateTable API</a>.</p><h4 id=skip-name-validation>Skip Name Validation</h4><p>Allow user to skip name validation for table name and namespaces.
+It is recommended to stick to Glue best practice in
+<a href=https://docs.aws.amazon.com/athena/latest/ug/glue-best-practices.html>https://docs.aws.amazon.com/athena/latest/ug/glue-best-practices.html</a> to make sure operations are Hive compatible.
+This is only added for users that have existing conventions using non-standard characters. When database name
+and table name validation are skipped, there is no guarantee that downstream systems would all support the names.</p><h4 id=optimistic-locking>Optimistic Locking</h4><p>By default, Iceberg uses Glue&rsquo;s optimistic locking for concurrent updates to a table.
+With optimistic locking, each table has a version id.
+If users retrieve the table metadata, Iceberg records the version id of that table.
+Users can update the table, but only if the version id on the server side has not changed.
+If there is a version mismatch, it means that someone else has modified the table before you did.
+The update attempt fails, because you have a stale version of the table.
+If this happens, Iceberg refreshes the metadata and checks if there might be potential conflict.
+If there is no commit conflict, the operation will be retried.
+Optimistic locking guarantees atomic transaction of Iceberg tables in Glue.
+It also prevents others from accidentally overwriting your changes.</p><div class=info>Please use AWS SDK version >= 2.17.131 to leverage Glue&rsquo;s Optimistic Locking.
+If the AWS SDK version is below 2.17.131, only in-memory lock is used. To ensure atomic transaction, you need to set up a <a href=#dynamodb-lock-manager>DynamoDb Lock Manager</a>.</div><h4 id=warehouse-location>Warehouse Location</h4><p>Similar to all other catalog implementations, <code>warehouse</code> is a required catalog property to determine the root path of the data warehouse in storage.
+By default, Glue only allows a warehouse location in S3 because of the use of <code>S3FileIO</code>.
+To store data in a different local or cloud store, Glue catalog can switch to use <code>HadoopFileIO</code> or any custom FileIO by setting the <code>io-impl</code> catalog property.
+Details about this feature can be found in the <a href=../custom-catalog/#custom-file-io-implementation>custom FileIO</a> section.</p><h4 id=table-location>Table Location</h4><p>By default, the root location for a table <code>my_table</code> of namespace <code>my_ns</code> is at <code>my-warehouse-location/my-ns.db/my-table</code>.
+This default root location can be changed at both namespace and table level.</p><p>To use a different path prefix for all tables under a namespace, use AWS console or any AWS Glue client SDK you like to update the <code>locationUri</code> attribute of the corresponding Glue database.
+For example, you can update the <code>locationUri</code> of <code>my_ns</code> to <code>s3://my-ns-bucket</code>,
+then any newly created table will have a default root location under the new prefix.
+For instance, a new table <code>my_table_2</code> will have its root location at <code>s3://my-ns-bucket/my_table_2</code>.</p><p>To use a completely different root path for a specific table, set the <code>location</code> table property to the desired root path value you want.
+For example, in Spark SQL you can do:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>CREATE</span> <span style=color:#66d9ef>TABLE</span> my_catalog.my_ns.my_table (
+</span></span><span style=display:flex><span>    id bigint,
+</span></span><span style=display:flex><span>    <span style=color:#66d9ef>data</span> string,
+</span></span><span style=display:flex><span>    category string)
+</span></span><span style=display:flex><span><span style=color:#66d9ef>USING</span> iceberg
+</span></span><span style=display:flex><span><span style=color:#66d9ef>OPTIONS</span> (<span style=color:#e6db74>&#39;location&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;s3://my-special-table-bucket&#39;</span>)
+</span></span><span style=display:flex><span>PARTITIONED <span style=color:#66d9ef>BY</span> (category);
+</span></span></code></pre></div><p>For engines like Spark that supports the <code>LOCATION</code> keyword, the above SQL statement is equivalent to:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>CREATE</span> <span style=color:#66d9ef>TABLE</span> my_catalog.my_ns.my_table (
+</span></span><span style=display:flex><span>    id bigint,
+</span></span><span style=display:flex><span>    <span style=color:#66d9ef>data</span> string,
+</span></span><span style=display:flex><span>    category string)
+</span></span><span style=display:flex><span><span style=color:#66d9ef>USING</span> iceberg
+</span></span><span style=display:flex><span><span style=color:#66d9ef>LOCATION</span> <span style=color:#e6db74>&#39;s3://my-special-table-bucket&#39;</span>
+</span></span><span style=display:flex><span>PARTITIONED <span style=color:#66d9ef>BY</span> (category);
+</span></span></code></pre></div><h3 id=dynamodb-catalog>DynamoDB Catalog</h3><p>Iceberg supports using a <a href=https://aws.amazon.com/dynamodb>DynamoDB</a> table to record and manage database and table information.</p><h4 id=configurations>Configurations</h4><p>The DynamoDB catalog supports the following configurations:</p><table><thead><tr><th>Property</th><th>Default</th><th>Description</th></tr></thead><tbody><tr><td>dynamodb.table-name</td><td>iceberg</td><td>name of the DynamoDB  [...]
+You can configure to use JDBC catalog with relational database services like <a href=https://aws.amazon.com/rds>AWS RDS</a>.
+Read <a href=../jdbc/#jdbc-catalog>the JDBC integration page</a> for guides and examples about using the JDBC catalog.
+Read <a href=https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.Connecting.Java.html>this AWS documentation</a> for more details about configuring JDBC catalog with IAM authentication.</p><h3 id=which-catalog-to-choose>Which catalog to choose?</h3><p>With all the available options, we offer the following guidance when choosing the right catalog to use for your application:</p><ol><li>if your organization has an existing Glue metastore or plans to use the AWS an [...]
+the catalog first obtains a lock using a helper DynamoDB table and then try to safely modify the Iceberg table.
+This is necessary for a file system-based catalog to ensure atomic transaction in storages like S3 that do not provide file write mutual exclusion.</p><p>This feature requires the following lock related catalog properties:</p><ol><li>Set <code>lock-impl</code> as <code>org.apache.iceberg.aws.dynamodb.DynamoDbLockManager</code>.</li><li>Set <code>lock.table</code> as the DynamoDB table name you would like to use. If the lock table with the given name does not exist in DynamoDB, a new tabl [...]
+For more details, please refer to <a href=../configuration/#lock-catalog-properties>Lock catalog properties</a>.</p><h2 id=s3-fileio>S3 FileIO</h2><p>Iceberg allows users to write data to S3 through <code>S3FileIO</code>.
+<code>GlueCatalog</code> by default uses this <code>FileIO</code>, and other catalogs can load this <code>FileIO</code> using the <code>io-impl</code> catalog property.</p><h3 id=progressive-multipart-upload>Progressive Multipart Upload</h3><p><code>S3FileIO</code> implements a customized progressive multipart upload algorithm to upload data.
+Data files are uploaded by parts in parallel as soon as each part is ready,
+and each file part is deleted as soon as its upload process completes.
+This provides maximized upload speed and minimized local disk usage during uploads.
+Here are the configurations that users can tune related to this feature:</p><table><thead><tr><th>Property</th><th>Default</th><th>Description</th></tr></thead><tbody><tr><td>s3.multipart.num-threads</td><td>the available number of processors in the system</td><td>number of threads to use for uploading parts to S3 (shared across all output streams)</td></tr><tr><td>s3.multipart.part-size-bytes</td><td>32MB</td><td>the size of a single part for multipart upload requests</td></tr><tr><td>s [...]
+User can choose the ACL level by setting the <code>s3.acl</code> property.
+For more details, please read <a href=https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html>S3 ACL Documentation</a>.</p><h3 id=object-store-file-layout>Object Store File Layout</h3><p>S3 and many other cloud storage services <a href=https://aws.amazon.com/premiumsupport/knowledge-center/s3-request-limit-avoid-throttling/>throttle requests based on object prefix</a>.
+Data stored in S3 with a traditional Hive storage layout can face S3 request throttling as objects are stored under the same filepath prefix.</p><p>Iceberg by default uses the Hive storage layout, but can be switched to use the <code>ObjectStoreLocationProvider</code>.
+With <code>ObjectStoreLocationProvider</code>, a determenistic hash is generated for each stored file, with the hash appended
+directly after the <code>write.data.path</code>. This ensures files written to s3 are equally distributed across multiple <a href=https://aws.amazon.com/premiumsupport/knowledge-center/s3-object-key-naming-pattern/>prefixes</a> in the S3 bucket. Resulting in minimized throttling and maximized throughput for S3-related IO operations. When using <code>ObjectStoreLocationProvider</code> having a shared and short <code>write.data.path</code> across your Iceberg tables will improve performanc [...]
+Below is an example Spark SQL command to create a table using the <code>ObjectStorageLocationProvider</code>:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>CREATE</span> <span style=color:#66d9ef>TABLE</span> my_catalog.my_ns.my_table (
+</span></span><span style=display:flex><span>    id bigint,
+</span></span><span style=display:flex><span>    <span style=color:#66d9ef>data</span> string,
+</span></span><span style=display:flex><span>    category string)
+</span></span><span style=display:flex><span><span style=color:#66d9ef>USING</span> iceberg
+</span></span><span style=display:flex><span><span style=color:#66d9ef>OPTIONS</span> (
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#39;write.object-storage.enabled&#39;</span><span style=color:#f92672>=</span><span style=color:#66d9ef>true</span>, 
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#39;write.data.path&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;s3://my-table-data-bucket&#39;</span>)
+</span></span><span style=display:flex><span>PARTITIONED <span style=color:#66d9ef>BY</span> (category);
+</span></span></code></pre></div><p>We can then insert a single row into this new table</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-SQL data-lang=SQL><span style=display:flex><span><span style=color:#66d9ef>INSERT</span> <span style=color:#66d9ef>INTO</span> my_catalog.my_ns.my_table <span style=color:#66d9ef>VALUES</span> (<span style=color:#ae81ff>1</span>, <span style=color:#e6db74>& [...]
+</span></span></code></pre></div><p>Which will write the data to S3 with a hash (<code>2d3905f8</code>) appended directly after the <code>write.object-storage.path</code>, ensuring reads to the table are spread evenly across <a href=https://docs.aws.amazon.com/AmazonS3/latest/userguide/optimizing-performance.html>S3 bucket prefixes</a>, and improving performance.</p><pre tabindex=0><code>s3://my-table-data-bucket/2d3905f8/my_ns.db/my_table/category=orders/00000-0-5affc076-96a4-48f2-9cd2- [...]
+</code></pre><p>Note, the path resolution logic for <code>ObjectStoreLocationProvider</code> is <code>write.data.path</code> then <code>&lt;tableLocation>/data</code>.
+However, for the older versions up to 0.12.0, the logic is as follows:</p><ul><li>before 0.12.0, <code>write.object-storage.path</code> must be set.</li><li>at 0.12.0, <code>write.object-storage.path</code> then <code>write.folder-storage.path</code> then <code>&lt;tableLocation>/data</code>.</li></ul><p>For more details, please refer to the <a href=../custom-catalog/#custom-location-provider-implementation>LocationProvider Configuration</a> section.</p><h3 id=s3-strong-consistency>S3 St [...]
+There is no redundant consistency wait and check which might negatively impact performance during IO operations.</p><h3 id=hadoop-s3a-filesystem>Hadoop S3A FileSystem</h3><p>Before <code>S3FileIO</code> was introduced, many Iceberg users choose to use <code>HadoopFileIO</code> to write data to S3 through the <a href=https://github.com/apache/hadoop/blob/trunk/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java>S3A FileSystem</a>.
+As introduced in the previous sections, <code>S3FileIO</code> adopts latest AWS clients and S3 features for optimized security and performance,
+and is thus recommend for S3 use cases rather than the S3A FileSystem.</p><p><code>S3FileIO</code> writes data with <code>s3://</code> URI scheme, but it is also compatible with schemes written by the S3A FileSystem.
+This means for any table manifests containing <code>s3a://</code> or <code>s3n://</code> file paths, <code>S3FileIO</code> is still able to read them.
+This feature allows people to easily switch from S3A to <code>S3FileIO</code>.</p><p>If for any reason you have to use S3A, here are the instructions:</p><ol><li>To store data using S3A, specify the <code>warehouse</code> catalog property to be an S3A path, e.g. <code>s3a://my-bucket/my-warehouse</code></li><li>For <code>HiveCatalog</code>, to also store metadata using S3A, specify the Hadoop config property <code>hive.metastore.warehouse.dir</code> to be an S3A path.</li><li>Add <a href [...]
+This is turned off by default.</p><h3 id=s3-tags>S3 Tags</h3><p>Custom <a href=https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html>tags</a> can be added to S3 objects while writing and deleting.
+For example, to write S3 tags with Spark 3.3, you can start the Spark SQL shell with:</p><pre tabindex=0><code>spark-sql --conf spark.sql.catalog.my_catalog=org.apache.iceberg.spark.SparkCatalog \
+    --conf spark.sql.catalog.my_catalog.warehouse=s3://my-bucket/my/key/prefix \
+    --conf spark.sql.catalog.my_catalog.catalog-impl=org.apache.iceberg.aws.glue.GlueCatalog \
+    --conf spark.sql.catalog.my_catalog.io-impl=org.apache.iceberg.aws.s3.S3FileIO \
+    --conf spark.sql.catalog.my_catalog.s3.write.tags.my_key1=my_val1 \
+    --conf spark.sql.catalog.my_catalog.s3.write.tags.my_key2=my_val2
+</code></pre><p>For the above example, the objects in S3 will be saved with tags: <code>my_key1=my_val1</code> and <code>my_key2=my_val2</code>. Do note that the specified write tags will be saved only while object creation.</p><p>When the catalog property <code>s3.delete-enabled</code> is set to <code>false</code>, the objects are not hard-deleted from S3.
+This is expected to be used in combination with S3 delete tagging, so objects are tagged and removed using <a href=https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html>S3 lifecycle policy</a>.
+The property is set to <code>true</code> by default.</p><p>With the <code>s3.delete.tags</code> config, objects are tagged with the configured key-value pairs before deletion.
+Users can configure tag-based object lifecycle policy at bucket level to transition objects to different tiers.
+For example, to add S3 delete tags with Spark 3.3, you can start the Spark SQL shell with:</p><pre tabindex=0><code>sh spark-sql --conf spark.sql.catalog.my_catalog=org.apache.iceberg.spark.SparkCatalog \
+    --conf spark.sql.catalog.my_catalog.warehouse=s3://iceberg-warehouse/s3-tagging \
+    --conf spark.sql.catalog.my_catalog.catalog-impl=org.apache.iceberg.aws.glue.GlueCatalog \
+    --conf spark.sql.catalog.my_catalog.io-impl=org.apache.iceberg.aws.s3.S3FileIO \
+    --conf spark.sql.catalog.my_catalog.s3.delete.tags.my_key3=my_val3 \
+    --conf spark.sql.catalog.my_catalog.s3.delete-enabled=false
+</code></pre><p>For the above example, the objects in S3 will be saved with tags: <code>my_key3=my_val3</code> before deletion.
+Users can also use the catalog property <code>s3.delete.num-threads</code> to mention the number of threads to be used for adding delete tags to the S3 objects.</p><p>When the catalog property <code>s3.write.table-tag-enabled</code> and <code>s3.write.namespace-tag-enabled</code> is set to <code>true</code> then the objects in S3 will be saved with tags: <code>iceberg.table=&lt;table-name></code> and <code>iceberg.namespace=&lt;namespace-name></code>.
+Users can define access and data retention policy per namespace or table based on these tags.
+For example, to write table and namespace name as S3 tags with Spark 3.3, you can start the Spark SQL shell with:</p><pre tabindex=0><code>sh spark-sql --conf spark.sql.catalog.my_catalog=org.apache.iceberg.spark.SparkCatalog \
+    --conf spark.sql.catalog.my_catalog.warehouse=s3://iceberg-warehouse/s3-tagging \
+    --conf spark.sql.catalog.my_catalog.catalog-impl=org.apache.iceberg.aws.glue.GlueCatalog \
+    --conf spark.sql.catalog.my_catalog.io-impl=org.apache.iceberg.aws.s3.S3FileIO \
+    --conf spark.sql.catalog.my_catalog.s3.write.table-tag-enabled=true \
+    --conf spark.sql.catalog.my_catalog.s3.write.namespace-tag-enabled=true
+</code></pre><p>For more details on tag restrictions, please refer <a href=https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html>User-Defined Tag Restrictions</a>.</p><h3 id=s3-access-points>S3 Access Points</h3><p><a href=https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html>Access Points</a> can be used to perform
+S3 operations by specifying a mapping of bucket to access points. This is useful for multi-region access, cross-region access,
+disaster recovery, etc.</p><p>For using cross-region access points, we need to additionally set <code>use-arn-region-enabled</code> catalog property to
+<code>true</code> to enable <code>S3FileIO</code> to make cross-region calls, it&rsquo;s not required for same / multi-region access points.</p><p>For example, to use S3 access-point with Spark 3.3, you can start the Spark SQL shell with:</p><pre tabindex=0><code>spark-sql --conf spark.sql.catalog.my_catalog=org.apache.iceberg.spark.SparkCatalog \
+    --conf spark.sql.catalog.my_catalog.warehouse=s3://my-bucket2/my/key/prefix \
+    --conf spark.sql.catalog.my_catalog.catalog-impl=org.apache.iceberg.aws.glue.GlueCatalog \
+    --conf spark.sql.catalog.my_catalog.io-impl=org.apache.iceberg.aws.s3.S3FileIO \
+    --conf spark.sql.catalog.my_catalog.s3.use-arn-region-enabled=false \
+    --conf spark.sql.catalog.test.s3.access-points.my-bucket1=arn:aws:s3::123456789012:accesspoint:mfzwi23gnjvgw.mrap \
+    --conf spark.sql.catalog.test.s3.access-points.my-bucket2=arn:aws:s3::123456789012:accesspoint:mfzwi23gnjvgw.mrap
+</code></pre><p>For the above example, the objects in S3 on <code>my-bucket1</code> and <code>my-bucket2</code> buckets will use <code>arn:aws:s3::123456789012:accesspoint:mfzwi23gnjvgw.mrap</code>
+access-point for all S3 operations.</p><p>For more details on using access-points, please refer <a href=https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points-usage-examples.html>Using access points with compatible Amazon S3 operations</a>.</p><h3 id=s3-acceleration>S3 Acceleration</h3><p><a href=https://aws.amazon.com/s3/transfer-acceleration/>S3 Acceleration</a> can be used to speed up transfers to and from Amazon S3 by as much as 50-500% for long-distance transfer of larg [...]
+    --conf spark.sql.catalog.my_catalog.warehouse=s3://my-bucket2/my/key/prefix \
+    --conf spark.sql.catalog.my_catalog.catalog-impl=org.apache.iceberg.aws.glue.GlueCatalog \
+    --conf spark.sql.catalog.my_catalog.io-impl=org.apache.iceberg.aws.s3.S3FileIO \
+    --conf spark.sql.catalog.my_catalog.s3.acceleration-enabled=true
+</code></pre><p>For more details on using S3 Acceleration, please refer to <a href=https://docs.aws.amazon.com/AmazonS3/latest/userguide/transfer-acceleration.html>Configuring fast, secure file transfers using Amazon S3 Transfer Acceleration</a>.</p><h3 id=s3-dual-stack>S3 Dual-stack</h3><p><a href=https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html>S3 Dual-stack</a> allows a client to access an S3 bucket through a dual-stack endpoint.
+When clients make a request to a dual-stack endpoint, the bucket URL resolves to an IPv6 address if possible, otherwise fallback to IPv4.</p><p>To use S3 Dual-stack, we need to set <code>s3.dualstack-enabled</code> catalog property to <code>true</code> to enable <code>S3FileIO</code> to make dual-stack S3 calls.</p><p>For example, to use S3 Dual-stack with Spark 3.3, you can start the Spark SQL shell with:</p><pre tabindex=0><code>spark-sql --conf spark.sql.catalog.my_catalog=org.apache. [...]
+    --conf spark.sql.catalog.my_catalog.warehouse=s3://my-bucket2/my/key/prefix \
+    --conf spark.sql.catalog.my_catalog.catalog-impl=org.apache.iceberg.aws.glue.GlueCatalog \
+    --conf spark.sql.catalog.my_catalog.io-impl=org.apache.iceberg.aws.s3.S3FileIO \
+    --conf spark.sql.catalog.my_catalog.s3.dualstack-enabled=true
+</code></pre><p>For more details on using S3 Dual-stack, please refer <a href=https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html#dual-stack-endpoints-cli>Using dual-stack endpoints from the AWS CLI and the AWS SDKs</a></p><h2 id=aws-client-customization>AWS Client Customization</h2><p>Many organizations have customized their way of configuring AWS clients with their own credential provider, access proxy, retry strategy, etc.
+Iceberg allows users to plug in their own implementation of <code>org.apache.iceberg.aws.AwsClientFactory</code> by setting the <code>client.factory</code> catalog property.</p><h3 id=cross-account-and-cross-region-access>Cross-Account and Cross-Region Access</h3><p>It is a common use case for organizations to have a centralized AWS account for Glue metastore and S3 buckets, and use different AWS accounts and regions for different teams to access those resources.
+In this case, a <a href=https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html>cross-account IAM role</a> is needed to access those centralized resources.
+Iceberg provides an AWS client factory <code>AssumeRoleAwsClientFactory</code> to support this common use case.
+This also serves as an example for users who would like to implement their own AWS client factory.</p><p>This client factory has the following configurable catalog properties:</p><table><thead><tr><th>Property</th><th>Default</th><th>Description</th></tr></thead><tbody><tr><td>client.assume-role.arn</td><td>null, requires user input</td><td>ARN of the role to assume, e.g. arn:aws:iam::123456789:role/myRoleToAssume</td></tr><tr><td>client.assume-role.region</td><td>null, requires user inp [...]
+The Glue, S3 and DynamoDB clients are then initialized with the assume-role credential and region to access resources.
+Here is an example to start Spark shell with this client factory:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-shell data-lang=shell><span style=display:flex><span>spark-sql --packages org.apache.iceberg:iceberg-spark-runtime:1.0.0,software.amazon.awssdk:bundle:2.17.257 <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    --conf spark.sql.catalog.my_catalog<span style=color:#f92672>=</span>org.apache.iceberg.spark.SparkCatalog <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    --conf spark.sql.catalog.my_catalog.warehouse<span style=color:#f92672>=</span>s3://my-bucket/my/key/prefix <span style=color:#ae81ff>\ </span>   
+</span></span><span style=display:flex><span>    --conf spark.sql.catalog.my_catalog.catalog-impl<span style=color:#f92672>=</span>org.apache.iceberg.aws.glue.GlueCatalog <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    --conf spark.sql.catalog.my_catalog.client.factory<span style=color:#f92672>=</span>org.apache.iceberg.aws.AssumeRoleAwsClientFactory <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    --conf spark.sql.catalog.my_catalog.client.assume-role.arn<span style=color:#f92672>=</span>arn:aws:iam::123456789:role/myRoleToAssume <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    --conf spark.sql.catalog.my_catalog.client.assume-role.region<span style=color:#f92672>=</span>ap-northeast-1
+</span></span></code></pre></div><h3 id=http-client-configurations>HTTP Client Configurations</h3><p>AWS clients support two types of HTTP Client, <a href=https://mvnrepository.com/artifact/software.amazon.awssdk/url-connection-client>URL Connection HTTP Client</a>
+and <a href=https://mvnrepository.com/artifact/software.amazon.awssdk/apache-client>Apache HTTP Client</a>.
+By default, AWS clients use <strong>URL Connection</strong> HTTP Client to communicate with the service.
+This HTTP client optimizes for minimum dependencies and startup latency but support less functionality than other implementations.
+In contrast, Apache HTTP Client supports more functionalities and more customized settings, such as expect-continue handshake and TCP KeepAlive, at cost of extra dependency and additional startup latency.</p><p>For more details of configuration, see sections <a href=#url-connection-http-client-configurations>URL Connection HTTP Client Configurations</a> and <a href=#apache-http-client-configurations>Apache HTTP Client Configurations</a>.</p><p>Configure the following property to set the  [...]
+</span></span></code></pre></div><h4 id=apache-http-client-configurations>Apache HTTP Client Configurations</h4><p>Apache HTTP Client has the following configurable properties:</p><table><thead><tr><th>Property</th><th>Default</th><th>Description</th></tr></thead><tbody><tr><td>http-client.apache.socket-timeout-ms</td><td>null</td><td>An optional <a href=https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/http/apache/ApacheHttpClient.Builder.html#socketTimeout(java.time.Dura [...]
+</span></span></code></pre></div><h2 id=run-iceberg-on-aws>Run Iceberg on AWS</h2><h3 id=amazon-athena>Amazon Athena</h3><p><a href=https://aws.amazon.com/athena/>Amazon Athena</a> provides a serverless query engine that could be used to perform read, write, update and optimization tasks against Iceberg tables.
+More details could be found <a href=https://docs.aws.amazon.com/athena/latest/ug/querying-iceberg.html>here</a>.</p><h3 id=amazon-emr>Amazon EMR</h3><p><a href=https://aws.amazon.com/emr/>Amazon EMR</a> can provision clusters with <a href=https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-spark.html>Spark</a> (EMR 6 for Spark 3, EMR 5 for Spark 2),
+<a href=https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-hive.html>Hive</a>, <a href=https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-flink.html>Flink</a>,
+<a href=https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-presto.html>Trino</a> that can run Iceberg.</p><p>Starting with EMR version 6.5.0, EMR clusters can be configured to have the necessary Apache Iceberg dependencies installed without requiring bootstrap actions.
+Please refer to the <a href=https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-iceberg-use-cluster.html>official documentation</a> on how to create a cluster with Iceberg installed.</p><p>For versions before 6.5.0, you can use a <a href=https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-plan-bootstrap.html>bootstrap action</a> similar to the following to pre-install all necessary dependencies:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#2728 [...]
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>
+</span></span><span style=display:flex><span>AWS_SDK_VERSION<span style=color:#f92672>=</span>2.17.257
+</span></span><span style=display:flex><span>ICEBERG_VERSION<span style=color:#f92672>=</span>1.0.0
+</span></span><span style=display:flex><span>MAVEN_URL<span style=color:#f92672>=</span>https://repo1.maven.org/maven2
+</span></span><span style=display:flex><span>ICEBERG_MAVEN_URL<span style=color:#f92672>=</span>$MAVEN_URL/org/apache/iceberg
+</span></span><span style=display:flex><span>AWS_MAVEN_URL<span style=color:#f92672>=</span>$MAVEN_URL/software/amazon/awssdk
+</span></span><span style=display:flex><span><span style=color:#75715e># NOTE: this is just an example shared class path between Spark and Flink,</span>
+</span></span><span style=display:flex><span><span style=color:#75715e>#  please choose a proper class path for production.</span>
+</span></span><span style=display:flex><span>LIB_PATH<span style=color:#f92672>=</span>/usr/share/aws/aws-java-sdk/
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>AWS_PACKAGES<span style=color:#f92672>=(</span>
+</span></span><span style=display:flex><span>  <span style=color:#e6db74>&#34;bundle&#34;</span>
+</span></span><span style=display:flex><span>  <span style=color:#e6db74>&#34;url-connection-client&#34;</span>
+</span></span><span style=display:flex><span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>ICEBERG_PACKAGES<span style=color:#f92672>=(</span>
+</span></span><span style=display:flex><span>  <span style=color:#e6db74>&#34;iceberg-spark-runtime-3.3_2.12&#34;</span>
+</span></span><span style=display:flex><span>  <span style=color:#e6db74>&#34;iceberg-flink-runtime&#34;</span>
+</span></span><span style=display:flex><span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>install_dependencies <span style=color:#f92672>()</span> <span style=color:#f92672>{</span>
+</span></span><span style=display:flex><span>  install_path<span style=color:#f92672>=</span>$1
+</span></span><span style=display:flex><span>  download_url<span style=color:#f92672>=</span>$2
+</span></span><span style=display:flex><span>  version<span style=color:#f92672>=</span>$3
+</span></span><span style=display:flex><span>  shift
+</span></span><span style=display:flex><span>  pkgs<span style=color:#f92672>=(</span><span style=color:#e6db74>&#34;</span>$@<span style=color:#e6db74>&#34;</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>  <span style=color:#66d9ef>for</span> pkg in <span style=color:#e6db74>&#34;</span><span style=color:#e6db74>${</span>pkgs[@]<span style=color:#e6db74>}</span><span style=color:#e6db74>&#34;</span>; <span style=color:#66d9ef>do</span>
+</span></span><span style=display:flex><span>    sudo wget -P $install_path $download_url/$pkg/$version/$pkg-$version.jar
+</span></span><span style=display:flex><span>  <span style=color:#66d9ef>done</span>
+</span></span><span style=display:flex><span><span style=color:#f92672>}</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>install_dependencies $LIB_PATH $ICEBERG_MAVEN_URL $ICEBERG_VERSION <span style=color:#e6db74>&#34;</span><span style=color:#e6db74>${</span>ICEBERG_PACKAGES[@]<span style=color:#e6db74>}</span><span style=color:#e6db74>&#34;</span>
+</span></span><span style=display:flex><span>install_dependencies $LIB_PATH $AWS_MAVEN_URL $AWS_SDK_VERSION <span style=color:#e6db74>&#34;</span><span style=color:#e6db74>${</span>AWS_PACKAGES[@]<span style=color:#e6db74>}</span><span style=color:#e6db74>&#34;</span>
+</span></span></code></pre></div><h3 id=aws-eks>AWS EKS</h3><p><a href=https://aws.amazon.com/eks/>AWS Elastic Kubernetes Service (EKS)</a> can be used to start any Spark, Flink, Hive, Presto or Trino clusters to work with Iceberg.
+Search the <a href=../../../blogs>Iceberg blogs</a> page for tutorials around running Iceberg with Docker and Kubernetes.</p><h3 id=amazon-kinesis>Amazon Kinesis</h3><p><a href=https://aws.amazon.com/about-aws/whats-new/2019/11/you-can-now-run-fully-managed-apache-flink-applications-with-apache-kafka/>Amazon Kinesis Data Analytics</a> provides a platform
+to run fully managed Apache Flink applications. You can include Iceberg in your application Jar and run it in the platform.</p></div><div id=toc class=markdown-body><div id=full><nav id=TableOfContents><ul><li><a href=#enabling-aws-integration>Enabling AWS Integration</a><ul><li><a href=#spark>Spark</a></li><li><a href=#flink>Flink</a></li><li><a href=#hive>Hive</a></li></ul></li><li><a href=#catalogs>Catalogs</a><ul><li><a href=#glue-catalog>Glue Catalog</a></li><li><a href=#dynamodb-ca [...]
+<script src=https://iceberg.apache.org/docs/1.1.0//js/jquery.easing.min.js></script>
+<script type=text/javascript src=https://iceberg.apache.org/docs/1.1.0//js/search.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/bootstrap.min.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/iceberg-theme.js></script></html>
\ No newline at end of file
diff --git a/docs/1.1.0/configuration/index.html b/docs/1.1.0/configuration/index.html
new file mode 100644
index 00000000..cb298ec4
--- /dev/null
+++ b/docs/1.1.0/configuration/index.html
@@ -0,0 +1,27 @@
+<!doctype html><html><head><meta charset=utf-8><meta http-equiv=x-ua-compatible content="IE=edge"><meta name=viewport content="width=device-width,initial-scale=1"><meta name=description content><meta name=author content><title>Configuration</title><link href=../css/bootstrap.css rel=stylesheet><link href=../css/markdown.css rel=stylesheet><link href=../css/katex.min.css rel=stylesheet><link href=../css/iceberg-theme.css rel=stylesheet><link href=../font-awesome-4.7.0/css/font-awesome.min [...]
+<span class=sr-only>Toggle navigation</span>
+<span class=icon-bar></span>
+<span class=icon-bar></span>
+<span class=icon-bar></span></button>
+<a class="page-scroll navbar-brand" href=https://iceberg.apache.org/><img class=top-navbar-logo src=https://iceberg.apache.org/docs/1.1.0//img/iceberg-logo-icon.png> Apache Iceberg</a></div><div><input type=search class=form-control id=search-input placeholder=Search... maxlength=64 data-hotkeys=s/></div><div class=versions-dropdown><span>1.0.0</span> <i class="fa fa-chevron-down"></i><div class=versions-dropdown-content><ul><li class=versions-dropdown-selection><a href=https://iceberg.a [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Tables class="collapse in"><ul class=sub-menu><li><a id=active href=../configuration/>Configuration</a></li><li><a href=../evolution/>Evolution</a></li><li><a href=../maintenance/>Maintenance</a></li><li><a href=../partitioning/>Partitioning</a></li><li><a href=../performance/>Performance</a></li><li><a href=../reliability/>Reliability</a></li><li><a href=../schemas/>Schemas</a></li></ul></div><li><a class="chevron-toggle collapsed" data [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Spark class=collapse><ul class=sub-menu><li><a href=../spark-ddl/>DDL</a></li><li><a href=../getting-started/>Getting Started</a></li><li><a href=../spark-procedures/>Procedures</a></li><li><a href=../spark-queries/>Queries</a></li><li><a href=../spark-structured-streaming/>Structured Streaming</a></li><li><a href=../spark-writes/>Writes</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Flink class=collapse><ul class=sub-menu><li><a href=../flink/>Enabling Iceberg in Flink</a></li><li><a href=../flink-connector/>Flink Connector</a></li></ul></div><li><a href=../hive/><span>Hive</span></a></li><li><a target=_blank href=https://trino.io/docs/current/connector/iceberg.html><span>Trino</span></a></li><li><a target=_blank href=https://prestodb.io/docs/current/connector/iceberg.html><span>Presto</span></a></li><li><a target=_ [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Integrations class=collapse><ul class=sub-menu><li><a href=../aws/>AWS</a></li><li><a href=../dell/>Dell</a></li><li><a href=../jdbc/>JDBC</a></li><li><a href=../nessie/>Nessie</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href=#API><span>API</span>
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=API class=collapse><ul class=sub-menu><li><a href=../java-api-quickstart/>Java Quickstart</a></li><li><a href=../api/>Java API</a></li><li><a href=../custom-catalog/>Java Custom Catalog</a></li></ul></div><li><a href=https://iceberg.apache.org/docs/1.1.0/../../javadoc/latest><span>Javadoc</span></a></li></div></div><div id=content class=markdown-body><div class=margin-for-toc><h1 id=configuration>Configuration</h1><h2 id=table-properties [...]
+The value of these properties are not persisted as a part of the table metadata.</p><table><thead><tr><th>Property</th><th>Default</th><th>Description</th></tr></thead><tbody><tr><td>format-version</td><td>1</td><td>Table&rsquo;s format version (can be 1 or 2) as defined in the <a href=../../../spec/#format-versioning>Spec</a>.</td></tr></tbody></table><h3 id=compatibility-flags>Compatibility flags</h3><table><thead><tr><th>Property</th><th>Default</th><th>Description</th></tr></thead><t [...]
+Any other custom catalog can access the properties by implementing <code>Catalog.initialize(catalogName, catalogProperties)</code>.
+The properties can be manually constructed or passed in from a compute engine like Spark or Flink.
+Spark uses its session properties as catalog properties, see more details in the <a href=../spark-configuration#catalog-configuration>Spark configuration</a> section.
+Flink passes in catalog properties through <code>CREATE CATALOG</code> statement, see more details in the <a href=../flink/#creating-catalogs-and-using-catalogs>Flink</a> section.</p><h3 id=lock-catalog-properties>Lock catalog properties</h3><p>Here are the catalog properties related to locking. They are used by some catalog implementations to control the locking behavior during commits.</p><table><thead><tr><th>Property</th><th>Default</th><th>Description</th></tr></thead><tbody><tr><td [...]
+of the Hive Metastore (<code>hive.txn.timeout</code> or <code>metastore.txn.timeout</code> in the newer versions). Otherwise, the heartbeats on the lock (which happens during the lock checks) would end up expiring in the
+Hive Metastore before the lock is retried from Iceberg.</p></div><div id=toc class=markdown-body><div id=full><nav id=TableOfContents><ul><li><a href=#table-properties>Table properties</a><ul><li><a href=#read-properties>Read properties</a></li><li><a href=#write-properties>Write properties</a></li><li><a href=#table-behavior-properties>Table behavior properties</a></li><li><a href=#reserved-table-properties>Reserved table properties</a></li><li><a href=#compatibility-flags>Compatibility [...]
+<script src=https://iceberg.apache.org/docs/1.1.0//js/jquery.easing.min.js></script>
+<script type=text/javascript src=https://iceberg.apache.org/docs/1.1.0//js/search.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/bootstrap.min.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/iceberg-theme.js></script></html>
\ No newline at end of file
diff --git a/docs/1.1.0/custom-catalog/index.html b/docs/1.1.0/custom-catalog/index.html
new file mode 100644
index 00000000..364e0d88
--- /dev/null
+++ b/docs/1.1.0/custom-catalog/index.html
@@ -0,0 +1,208 @@
+<!doctype html><html><head><meta charset=utf-8><meta http-equiv=x-ua-compatible content="IE=edge"><meta name=viewport content="width=device-width,initial-scale=1"><meta name=description content><meta name=author content><title>Java Custom Catalog</title><link href=../css/bootstrap.css rel=stylesheet><link href=../css/markdown.css rel=stylesheet><link href=../css/katex.min.css rel=stylesheet><link href=../css/iceberg-theme.css rel=stylesheet><link href=../font-awesome-4.7.0/css/font-aweso [...]
+<span class=sr-only>Toggle navigation</span>
+<span class=icon-bar></span>
+<span class=icon-bar></span>
+<span class=icon-bar></span></button>
+<a class="page-scroll navbar-brand" href=https://iceberg.apache.org/><img class=top-navbar-logo src=https://iceberg.apache.org/docs/1.1.0//img/iceberg-logo-icon.png> Apache Iceberg</a></div><div><input type=search class=form-control id=search-input placeholder=Search... maxlength=64 data-hotkeys=s/></div><div class=versions-dropdown><span>1.0.0</span> <i class="fa fa-chevron-down"></i><div class=versions-dropdown-content><ul><li class=versions-dropdown-selection><a href=https://iceberg.a [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Tables class=collapse><ul class=sub-menu><li><a href=../configuration/>Configuration</a></li><li><a href=../evolution/>Evolution</a></li><li><a href=../maintenance/>Maintenance</a></li><li><a href=../partitioning/>Partitioning</a></li><li><a href=../performance/>Performance</a></li><li><a href=../reliability/>Reliability</a></li><li><a href=../schemas/>Schemas</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collaps [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Spark class=collapse><ul class=sub-menu><li><a href=../spark-ddl/>DDL</a></li><li><a href=../getting-started/>Getting Started</a></li><li><a href=../spark-procedures/>Procedures</a></li><li><a href=../spark-queries/>Queries</a></li><li><a href=../spark-structured-streaming/>Structured Streaming</a></li><li><a href=../spark-writes/>Writes</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Flink class=collapse><ul class=sub-menu><li><a href=../flink/>Enabling Iceberg in Flink</a></li><li><a href=../flink-connector/>Flink Connector</a></li></ul></div><li><a href=../hive/><span>Hive</span></a></li><li><a target=_blank href=https://trino.io/docs/current/connector/iceberg.html><span>Trino</span></a></li><li><a target=_blank href=https://prestodb.io/docs/current/connector/iceberg.html><span>Presto</span></a></li><li><a target=_ [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Integrations class=collapse><ul class=sub-menu><li><a href=../aws/>AWS</a></li><li><a href=../dell/>Dell</a></li><li><a href=../jdbc/>JDBC</a></li><li><a href=../nessie/>Nessie</a></li></ul></div><li><a class=chevron-toggle data-toggle=collapse data-parent=full href=#API><span>API</span>
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=API class="collapse in"><ul class=sub-menu><li><a href=../java-api-quickstart/>Java Quickstart</a></li><li><a href=../api/>Java API</a></li><li><a id=active href=../custom-catalog/>Java Custom Catalog</a></li></ul></div><li><a href=https://iceberg.apache.org/docs/1.1.0/../../javadoc/latest><span>Javadoc</span></a></li></div></div><div id=content class=markdown-body><div class=margin-for-toc><h1 id=custom-catalog>Custom Catalog</h1><p>It& [...]
+</span></span><span style=display:flex><span>  <span style=color:#66d9ef>private</span> String dbName<span style=color:#f92672>;</span>
+</span></span><span style=display:flex><span>  <span style=color:#66d9ef>private</span> String tableName<span style=color:#f92672>;</span>
+</span></span><span style=display:flex><span>  <span style=color:#66d9ef>private</span> Configuration conf<span style=color:#f92672>;</span>
+</span></span><span style=display:flex><span>  <span style=color:#66d9ef>private</span> FileIO fileIO<span style=color:#f92672>;</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>  <span style=color:#66d9ef>protected</span> <span style=color:#a6e22e>CustomTableOperations</span><span style=color:#f92672>(</span>Configuration conf<span style=color:#f92672>,</span> String dbName<span style=color:#f92672>,</span> String tableName<span style=color:#f92672>)</span> <span style=color:#f92672>{</span>
+</span></span><span style=display:flex><span>    <span style=color:#66d9ef>this</span><span style=color:#f92672>.</span><span style=color:#a6e22e>conf</span> <span style=color:#f92672>=</span> conf<span style=color:#f92672>;</span>
+</span></span><span style=display:flex><span>    <span style=color:#66d9ef>this</span><span style=color:#f92672>.</span><span style=color:#a6e22e>dbName</span> <span style=color:#f92672>=</span> dbName<span style=color:#f92672>;</span>
+</span></span><span style=display:flex><span>    <span style=color:#66d9ef>this</span><span style=color:#f92672>.</span><span style=color:#a6e22e>tableName</span> <span style=color:#f92672>=</span> tableName<span style=color:#f92672>;</span>
+</span></span><span style=display:flex><span>  <span style=color:#f92672>}</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>  <span style=color:#75715e>// The doRefresh method should provide implementation on how to get the metadata location
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>  <span style=color:#a6e22e>@Override</span>
+</span></span><span style=display:flex><span>  <span style=color:#66d9ef>public</span> <span style=color:#66d9ef>void</span> <span style=color:#a6e22e>doRefresh</span><span style=color:#f92672>()</span> <span style=color:#f92672>{</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>    <span style=color:#75715e>// Example custom service which returns the metadata location given a dbName and tableName
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>    String metadataLocation <span style=color:#f92672>=</span> CustomService<span style=color:#f92672>.</span><span style=color:#a6e22e>getMetadataForTable</span><span style=color:#f92672>(</span>conf<span style=color:#f92672>,</span> dbName<span style=color:#f92672>,</span> tableName<span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>    <span style=color:#75715e>// When updating from a metadata file location, call the helper method
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>    refreshFromMetadataLocation<span style=color:#f92672>(</span>metadataLocation<span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>  <span style=color:#f92672>}</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>  <span style=color:#75715e>// The doCommit method should provide implementation on how to update with metadata location atomically
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>  <span style=color:#a6e22e>@Override</span>
+</span></span><span style=display:flex><span>  <span style=color:#66d9ef>public</span> <span style=color:#66d9ef>void</span> <span style=color:#a6e22e>doCommit</span><span style=color:#f92672>(</span>TableMetadata base<span style=color:#f92672>,</span> TableMetadata metadata<span style=color:#f92672>)</span> <span style=color:#f92672>{</span>
+</span></span><span style=display:flex><span>    String oldMetadataLocation <span style=color:#f92672>=</span> base<span style=color:#f92672>.</span><span style=color:#a6e22e>location</span><span style=color:#f92672>();</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>    <span style=color:#75715e>// Write new metadata using helper method
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>    String newMetadataLocation <span style=color:#f92672>=</span> writeNewMetadata<span style=color:#f92672>(</span>metadata<span style=color:#f92672>,</span> currentVersion<span style=color:#f92672>()</span> <span style=color:#f92672>+</span> <span style=color:#ae81ff>1</span><span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>    <span style=color:#75715e>// Example custom service which updates the metadata location for the given db and table atomically
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>    CustomService<span style=color:#f92672>.</span><span style=color:#a6e22e>updateMetadataLocation</span><span style=color:#f92672>(</span>dbName<span style=color:#f92672>,</span> tableName<span style=color:#f92672>,</span> oldMetadataLocation<span style=color:#f92672>,</span> newMetadataLocation<span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>  <span style=color:#f92672>}</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>  <span style=color:#75715e>// The io method provides a FileIO which is used to read and write the table metadata files
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>  <span style=color:#a6e22e>@Override</span>
+</span></span><span style=display:flex><span>  <span style=color:#66d9ef>public</span> FileIO <span style=color:#a6e22e>io</span><span style=color:#f92672>()</span> <span style=color:#f92672>{</span>
+</span></span><span style=display:flex><span>    <span style=color:#66d9ef>if</span> <span style=color:#f92672>(</span>fileIO <span style=color:#f92672>==</span> <span style=color:#66d9ef>null</span><span style=color:#f92672>)</span> <span style=color:#f92672>{</span>
+</span></span><span style=display:flex><span>      fileIO <span style=color:#f92672>=</span> <span style=color:#66d9ef>new</span> HadoopFileIO<span style=color:#f92672>(</span>conf<span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>}</span>
+</span></span><span style=display:flex><span>    <span style=color:#66d9ef>return</span> fileIO<span style=color:#f92672>;</span>
+</span></span><span style=display:flex><span>  <span style=color:#f92672>}</span>
+</span></span><span style=display:flex><span><span style=color:#f92672>}</span>
+</span></span></code></pre></div><p>A <code>TableOperations</code> instance is usually obtained by calling <code>Catalog.newTableOps(TableIdentifier)</code>.
+See the next section about implementing and loading a custom catalog.</p><h3 id=custom-catalog-implementation>Custom catalog implementation</h3><p>Extend <code>BaseMetastoreCatalog</code> to provide default warehouse locations and instantiate <code>CustomTableOperations</code></p><p>Example:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-java data-lang=java><span style=display:flex><span>< [...]
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>  <span style=color:#66d9ef>private</span> Configuration configuration<span style=color:#f92672>;</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>  <span style=color:#75715e>// must have a no-arg constructor to be dynamically loaded
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>  <span style=color:#75715e>// initialize(String name, Map&lt;String, String&gt; properties) will be called to complete initialization
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>  <span style=color:#66d9ef>public</span> <span style=color:#a6e22e>CustomCatalog</span><span style=color:#f92672>()</span> <span style=color:#f92672>{</span>
+</span></span><span style=display:flex><span>  <span style=color:#f92672>}</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>  <span style=color:#66d9ef>public</span> <span style=color:#a6e22e>CustomCatalog</span><span style=color:#f92672>(</span>Configuration configuration<span style=color:#f92672>)</span> <span style=color:#f92672>{</span>
+</span></span><span style=display:flex><span>    <span style=color:#66d9ef>this</span><span style=color:#f92672>.</span><span style=color:#a6e22e>configuration</span> <span style=color:#f92672>=</span> configuration<span style=color:#f92672>;</span>
+</span></span><span style=display:flex><span>  <span style=color:#f92672>}</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>  <span style=color:#a6e22e>@Override</span>
+</span></span><span style=display:flex><span>  <span style=color:#66d9ef>protected</span> TableOperations <span style=color:#a6e22e>newTableOps</span><span style=color:#f92672>(</span>TableIdentifier tableIdentifier<span style=color:#f92672>)</span> <span style=color:#f92672>{</span>
+</span></span><span style=display:flex><span>    String dbName <span style=color:#f92672>=</span> tableIdentifier<span style=color:#f92672>.</span><span style=color:#a6e22e>namespace</span><span style=color:#f92672>().</span><span style=color:#a6e22e>level</span><span style=color:#f92672>(</span><span style=color:#ae81ff>0</span><span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>    String tableName <span style=color:#f92672>=</span> tableIdentifier<span style=color:#f92672>.</span><span style=color:#a6e22e>name</span><span style=color:#f92672>();</span>
+</span></span><span style=display:flex><span>    <span style=color:#75715e>// instantiate the CustomTableOperations
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>    <span style=color:#66d9ef>return</span> <span style=color:#66d9ef>new</span> CustomTableOperations<span style=color:#f92672>(</span>configuration<span style=color:#f92672>,</span> dbName<span style=color:#f92672>,</span> tableName<span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>  <span style=color:#f92672>}</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>  <span style=color:#a6e22e>@Override</span>
+</span></span><span style=display:flex><span>  <span style=color:#66d9ef>protected</span> String <span style=color:#a6e22e>defaultWarehouseLocation</span><span style=color:#f92672>(</span>TableIdentifier tableIdentifier<span style=color:#f92672>)</span> <span style=color:#f92672>{</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>    <span style=color:#75715e>// Can choose to use any other configuration name
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>    String tableLocation <span style=color:#f92672>=</span> configuration<span style=color:#f92672>.</span><span style=color:#a6e22e>get</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;custom.iceberg.warehouse.location&#34;</span><span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>    <span style=color:#75715e>// Can be an s3 or hdfs path
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>    <span style=color:#66d9ef>if</span> <span style=color:#f92672>(</span>tableLocation <span style=color:#f92672>==</span> <span style=color:#66d9ef>null</span><span style=color:#f92672>)</span> <span style=color:#f92672>{</span>
+</span></span><span style=display:flex><span>      <span style=color:#66d9ef>throw</span> <span style=color:#66d9ef>new</span> RuntimeException<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;custom.iceberg.warehouse.location configuration not set!&#34;</span><span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>}</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>    <span style=color:#66d9ef>return</span> String<span style=color:#f92672>.</span><span style=color:#a6e22e>format</span><span style=color:#f92672>(</span>
+</span></span><span style=display:flex><span>            <span style=color:#e6db74>&#34;%s/%s.db/%s&#34;</span><span style=color:#f92672>,</span> tableLocation<span style=color:#f92672>,</span>
+</span></span><span style=display:flex><span>            tableIdentifier<span style=color:#f92672>.</span><span style=color:#a6e22e>namespace</span><span style=color:#f92672>().</span><span style=color:#a6e22e>levels</span><span style=color:#f92672>()[</span><span style=color:#ae81ff>0</span><span style=color:#f92672>],</span>
+</span></span><span style=display:flex><span>            tableIdentifier<span style=color:#f92672>.</span><span style=color:#a6e22e>name</span><span style=color:#f92672>());</span>
+</span></span><span style=display:flex><span>  <span style=color:#f92672>}</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>  <span style=color:#a6e22e>@Override</span>
+</span></span><span style=display:flex><span>  <span style=color:#66d9ef>public</span> <span style=color:#66d9ef>boolean</span> <span style=color:#a6e22e>dropTable</span><span style=color:#f92672>(</span>TableIdentifier identifier<span style=color:#f92672>,</span> <span style=color:#66d9ef>boolean</span> purge<span style=color:#f92672>)</span> <span style=color:#f92672>{</span>
+</span></span><span style=display:flex><span>    <span style=color:#75715e>// Example service to delete table
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>    CustomService<span style=color:#f92672>.</span><span style=color:#a6e22e>deleteTable</span><span style=color:#f92672>(</span>identifier<span style=color:#f92672>.</span><span style=color:#a6e22e>namepsace</span><span style=color:#f92672>().</span><span style=color:#a6e22e>level</span><span style=color:#f92672>(</span><span style=color:#ae81ff>0</span><span style=color:#f92672>),</span> identifier<spa [...]
+</span></span><span style=display:flex><span>  <span style=color:#f92672>}</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>  <span style=color:#a6e22e>@Override</span>
+</span></span><span style=display:flex><span>  <span style=color:#66d9ef>public</span> <span style=color:#66d9ef>void</span> <span style=color:#a6e22e>renameTable</span><span style=color:#f92672>(</span>TableIdentifier from<span style=color:#f92672>,</span> TableIdentifier to<span style=color:#f92672>)</span> <span style=color:#f92672>{</span>
+</span></span><span style=display:flex><span>    Preconditions<span style=color:#f92672>.</span><span style=color:#a6e22e>checkArgument</span><span style=color:#f92672>(</span>from<span style=color:#f92672>.</span><span style=color:#a6e22e>namespace</span><span style=color:#f92672>().</span><span style=color:#a6e22e>level</span><span style=color:#f92672>(</span><span style=color:#ae81ff>0</span><span style=color:#f92672>).</span><span style=color:#a6e22e>equals</span><span style=color:#f [...]
+</span></span><span style=display:flex><span>            <span style=color:#e6db74>&#34;Cannot move table between databases&#34;</span><span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>    <span style=color:#75715e>// Example service to rename table
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>    CustomService<span style=color:#f92672>.</span><span style=color:#a6e22e>renameTable</span><span style=color:#f92672>(</span>from<span style=color:#f92672>.</span><span style=color:#a6e22e>namepsace</span><span style=color:#f92672>().</span><span style=color:#a6e22e>level</span><span style=color:#f92672>(</span><span style=color:#ae81ff>0</span><span style=color:#f92672>),</span> from<span style=colo [...]
+</span></span><span style=display:flex><span>  <span style=color:#f92672>}</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>  <span style=color:#75715e>// implement this method to read catalog name and properties during initialization
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>  <span style=color:#66d9ef>public</span> <span style=color:#66d9ef>void</span> <span style=color:#a6e22e>initialize</span><span style=color:#f92672>(</span>String name<span style=color:#f92672>,</span> Map<span style=color:#f92672>&lt;</span>String<span style=color:#f92672>,</span> String<span style=color:#f92672>&gt;</span> properties<span style=color:#f92672>)</span> <span style=color:#f92672>{</span>
+</span></span><span style=display:flex><span>  <span style=color:#f92672>}</span>
+</span></span><span style=display:flex><span><span style=color:#f92672>}</span>
+</span></span></code></pre></div><p>Catalog implementations can be dynamically loaded in most compute engines.
+For Spark and Flink, you can specify the <code>catalog-impl</code> catalog property to load it.
+Read the <a href=../configuration/#catalog-properties>Configuration</a> section for more details.
+For MapReduce, implement <code>org.apache.iceberg.mr.CatalogLoader</code> and set Hadoop property <code>iceberg.mr.catalog.loader.class</code> to load it.
+If your catalog must read Hadoop configuration to access certain environment properties, make your catalog implement <code>org.apache.hadoop.conf.Configurable</code>.</p><h3 id=custom-file-io-implementation>Custom file IO implementation</h3><p>Extend <code>FileIO</code> and provide implementation to read and write data files</p><p>Example:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-jav [...]
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>  <span style=color:#75715e>// must have a no-arg constructor to be dynamically loaded
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>  <span style=color:#75715e>// initialize(Map&lt;String, String&gt; properties) will be called to complete initialization
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>  <span style=color:#66d9ef>public</span> <span style=color:#a6e22e>CustomFileIO</span><span style=color:#f92672>()</span> <span style=color:#f92672>{</span>
+</span></span><span style=display:flex><span>  <span style=color:#f92672>}</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>  <span style=color:#a6e22e>@Override</span>
+</span></span><span style=display:flex><span>  <span style=color:#66d9ef>public</span> InputFile <span style=color:#a6e22e>newInputFile</span><span style=color:#f92672>(</span>String s<span style=color:#f92672>)</span> <span style=color:#f92672>{</span>
+</span></span><span style=display:flex><span>    <span style=color:#75715e>// you also need to implement the InputFile interface for a custom input file
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>    <span style=color:#66d9ef>return</span> <span style=color:#66d9ef>new</span> CustomInputFile<span style=color:#f92672>(</span>s<span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>  <span style=color:#f92672>}</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>  <span style=color:#a6e22e>@Override</span>
+</span></span><span style=display:flex><span>  <span style=color:#66d9ef>public</span> OutputFile <span style=color:#a6e22e>newOutputFile</span><span style=color:#f92672>(</span>String s<span style=color:#f92672>)</span> <span style=color:#f92672>{</span>
+</span></span><span style=display:flex><span>    <span style=color:#75715e>// you also need to implement the OutputFile interface for a custom output file
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>    <span style=color:#66d9ef>return</span> <span style=color:#66d9ef>new</span> CustomOutputFile<span style=color:#f92672>(</span>s<span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>  <span style=color:#f92672>}</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>  <span style=color:#a6e22e>@Override</span>
+</span></span><span style=display:flex><span>  <span style=color:#66d9ef>public</span> <span style=color:#66d9ef>void</span> <span style=color:#a6e22e>deleteFile</span><span style=color:#f92672>(</span>String path<span style=color:#f92672>)</span> <span style=color:#f92672>{</span>
+</span></span><span style=display:flex><span>    Path toDelete <span style=color:#f92672>=</span> <span style=color:#66d9ef>new</span> Path<span style=color:#f92672>(</span>path<span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>    FileSystem fs <span style=color:#f92672>=</span> Util<span style=color:#f92672>.</span><span style=color:#a6e22e>getFs</span><span style=color:#f92672>(</span>toDelete<span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>    <span style=color:#66d9ef>try</span> <span style=color:#f92672>{</span>
+</span></span><span style=display:flex><span>        fs<span style=color:#f92672>.</span><span style=color:#a6e22e>delete</span><span style=color:#f92672>(</span>toDelete<span style=color:#f92672>,</span> <span style=color:#66d9ef>false</span> <span style=color:#75715e>/* not recursive */</span><span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>}</span> <span style=color:#66d9ef>catch</span> <span style=color:#f92672>(</span>IOException e<span style=color:#f92672>)</span> <span style=color:#f92672>{</span>
+</span></span><span style=display:flex><span>        <span style=color:#66d9ef>throw</span> <span style=color:#66d9ef>new</span> RuntimeIOException<span style=color:#f92672>(</span>e<span style=color:#f92672>,</span> <span style=color:#e6db74>&#34;Failed to delete file: %s&#34;</span><span style=color:#f92672>,</span> path<span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>}</span>
+</span></span><span style=display:flex><span>  <span style=color:#f92672>}</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>  <span style=color:#75715e>// implement this method to read catalog properties during initialization
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>  <span style=color:#66d9ef>public</span> <span style=color:#66d9ef>void</span> <span style=color:#a6e22e>initialize</span><span style=color:#f92672>(</span>Map<span style=color:#f92672>&lt;</span>String<span style=color:#f92672>,</span> String<span style=color:#f92672>&gt;</span> properties<span style=color:#f92672>)</span> <span style=color:#f92672>{</span>
+</span></span><span style=display:flex><span>  <span style=color:#f92672>}</span>
+</span></span><span style=display:flex><span><span style=color:#f92672>}</span>
+</span></span></code></pre></div><p>If you are already implementing your own catalog, you can implement <code>TableOperations.io()</code> to use your custom <code>FileIO</code>.
+In addition, custom <code>FileIO</code> implementations can also be dynamically loaded in <code>HadoopCatalog</code> and <code>HiveCatalog</code> by specifying the <code>io-impl</code> catalog property.
+Read the <a href=../configuration/#catalog-properties>Configuration</a> section for more details.
+If your <code>FileIO</code> must read Hadoop configuration to access certain environment properties, make your <code>FileIO</code> implement <code>org.apache.hadoop.conf.Configurable</code>.</p><h3 id=custom-location-provider-implementation>Custom location provider implementation</h3><p>Extend <code>LocationProvider</code> and provide implementation to determine the file path to write data</p><p>Example:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822 [...]
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>  <span style=color:#66d9ef>private</span> String tableLocation<span style=color:#f92672>;</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>  <span style=color:#75715e>// must have a 2-arg constructor like this, or a no-arg constructor
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>  <span style=color:#66d9ef>public</span> <span style=color:#a6e22e>CustomLocationProvider</span><span style=color:#f92672>(</span>String tableLocation<span style=color:#f92672>,</span> Map<span style=color:#f92672>&lt;</span>String<span style=color:#f92672>,</span> String<span style=color:#f92672>&gt;</span> properties<span style=color:#f92672>)</span> <span style=color:#f92672>{</span>
+</span></span><span style=display:flex><span>    <span style=color:#66d9ef>this</span><span style=color:#f92672>.</span><span style=color:#a6e22e>tableLocation</span> <span style=color:#f92672>=</span> tableLocation<span style=color:#f92672>;</span>
+</span></span><span style=display:flex><span>  <span style=color:#f92672>}</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>  <span style=color:#a6e22e>@Override</span>
+</span></span><span style=display:flex><span>  <span style=color:#66d9ef>public</span> String <span style=color:#a6e22e>newDataLocation</span><span style=color:#f92672>(</span>String filename<span style=color:#f92672>)</span> <span style=color:#f92672>{</span>
+</span></span><span style=display:flex><span>    <span style=color:#75715e>// can use any custom method to generate a file path given a file name
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>    <span style=color:#66d9ef>return</span> String<span style=color:#f92672>.</span><span style=color:#a6e22e>format</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;%s/%s/%s&#34;</span><span style=color:#f92672>,</span> tableLocation<span style=color:#f92672>,</span> UUID<span style=color:#f92672>.</span><span style=color:#a6e22e>randomUUID</span><span style=color:#f92672>().</span [...]
+</span></span><span style=display:flex><span>  <span style=color:#f92672>}</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>  <span style=color:#a6e22e>@Override</span>
+</span></span><span style=display:flex><span>  <span style=color:#66d9ef>public</span> String <span style=color:#a6e22e>newDataLocation</span><span style=color:#f92672>(</span>PartitionSpec spec<span style=color:#f92672>,</span> StructLike partitionData<span style=color:#f92672>,</span> String filename<span style=color:#f92672>)</span> <span style=color:#f92672>{</span>
+</span></span><span style=display:flex><span>    <span style=color:#75715e>// can use any custom method to generate a file path given a partition info and file name
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>    <span style=color:#66d9ef>return</span> newDataLocation<span style=color:#f92672>(</span>filename<span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>  <span style=color:#f92672>}</span>
+</span></span><span style=display:flex><span><span style=color:#f92672>}</span>
+</span></span></code></pre></div><p>If you are already implementing your own catalog, you can override <code>TableOperations.locationProvider()</code> to use your custom default <code>LocationProvider</code>.
+To use a different custom location provider for a specific table, specify the implementation when creating the table using table property <code>write.location-provider.impl</code></p><p>Example:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>CREATE</span> <span style=color:#66d9ef>TABLE</span> hive.<span style=color [...]
+</span></span><span style=display:flex><span>  id bigint,
+</span></span><span style=display:flex><span>  <span style=color:#66d9ef>data</span> string,
+</span></span><span style=display:flex><span>  category string)
+</span></span><span style=display:flex><span><span style=color:#66d9ef>USING</span> iceberg
+</span></span><span style=display:flex><span><span style=color:#66d9ef>OPTIONS</span> (
+</span></span><span style=display:flex><span>  <span style=color:#e6db74>&#39;write.location-provider.impl&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;com.my.CustomLocationProvider&#39;</span>
+</span></span><span style=display:flex><span>)
+</span></span><span style=display:flex><span>PARTITIONED <span style=color:#66d9ef>BY</span> (category);
+</span></span></code></pre></div><h3 id=custom-icebergsource>Custom IcebergSource</h3><p>Extend <code>IcebergSource</code> and provide implementation to read from <code>CustomCatalog</code></p><p>Example:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-java data-lang=java><span style=display:flex><span><span style=color:#66d9ef>public</span> <span style=color:#66d9ef>class</span> <span styl [...]
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>  <span style=color:#a6e22e>@Override</span>
+</span></span><span style=display:flex><span>  <span style=color:#66d9ef>protected</span> Table <span style=color:#a6e22e>findTable</span><span style=color:#f92672>(</span>DataSourceOptions options<span style=color:#f92672>,</span> Configuration conf<span style=color:#f92672>)</span> <span style=color:#f92672>{</span>
+</span></span><span style=display:flex><span>    Optional<span style=color:#f92672>&lt;</span>String<span style=color:#f92672>&gt;</span> path <span style=color:#f92672>=</span> options<span style=color:#f92672>.</span><span style=color:#a6e22e>get</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;path&#34;</span><span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>    Preconditions<span style=color:#f92672>.</span><span style=color:#a6e22e>checkArgument</span><span style=color:#f92672>(</span>path<span style=color:#f92672>.</span><span style=color:#a6e22e>isPresent</span><span style=color:#f92672>(),</span> <span style=color:#e6db74>&#34;Cannot open table: path is not set&#34;</span><span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>    <span style=color:#75715e>// Read table from CustomCatalog
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>    CustomCatalog catalog <span style=color:#f92672>=</span> <span style=color:#66d9ef>new</span> CustomCatalog<span style=color:#f92672>(</span>conf<span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>    TableIdentifier tableIdentifier <span style=color:#f92672>=</span> TableIdentifier<span style=color:#f92672>.</span><span style=color:#a6e22e>parse</span><span style=color:#f92672>(</span>path<span style=color:#f92672>.</span><span style=color:#a6e22e>get</span><span style=color:#f92672>());</span>
+</span></span><span style=display:flex><span>    <span style=color:#66d9ef>return</span> catalog<span style=color:#f92672>.</span><span style=color:#a6e22e>loadTable</span><span style=color:#f92672>(</span>tableIdentifier<span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>  <span style=color:#f92672>}</span>
+</span></span><span style=display:flex><span><span style=color:#f92672>}</span>
+</span></span></code></pre></div><p>Register the <code>CustomIcebergSource</code> by updating <code>META-INF/services/org.apache.spark.sql.sources.DataSourceRegister</code> with its fully qualified name</p></div><div id=toc class=markdown-body><div id=full><nav id=TableOfContents><ul><li><ul><li><a href=#custom-table-operations-implementation>Custom table operations implementation</a></li><li><a href=#custom-catalog-implementation>Custom catalog implementation</a></li><li><a href=#custom [...]
+<script src=https://iceberg.apache.org/docs/1.1.0//js/jquery.easing.min.js></script>
+<script type=text/javascript src=https://iceberg.apache.org/docs/1.1.0//js/search.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/bootstrap.min.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/iceberg-theme.js></script></html>
\ No newline at end of file
diff --git a/docs/1.1.0/dell/index.html b/docs/1.1.0/dell/index.html
new file mode 100644
index 00000000..ff4f701b
--- /dev/null
+++ b/docs/1.1.0/dell/index.html
@@ -0,0 +1,63 @@
+<!doctype html><html><head><meta charset=utf-8><meta http-equiv=x-ua-compatible content="IE=edge"><meta name=viewport content="width=device-width,initial-scale=1"><meta name=description content><meta name=author content><title>Dell</title><link href=../css/bootstrap.css rel=stylesheet><link href=../css/markdown.css rel=stylesheet><link href=../css/katex.min.css rel=stylesheet><link href=../css/iceberg-theme.css rel=stylesheet><link href=../font-awesome-4.7.0/css/font-awesome.min.css rel= [...]
+<span class=sr-only>Toggle navigation</span>
+<span class=icon-bar></span>
+<span class=icon-bar></span>
+<span class=icon-bar></span></button>
+<a class="page-scroll navbar-brand" href=https://iceberg.apache.org/><img class=top-navbar-logo src=https://iceberg.apache.org/docs/1.1.0//img/iceberg-logo-icon.png> Apache Iceberg</a></div><div><input type=search class=form-control id=search-input placeholder=Search... maxlength=64 data-hotkeys=s/></div><div class=versions-dropdown><span>1.0.0</span> <i class="fa fa-chevron-down"></i><div class=versions-dropdown-content><ul><li class=versions-dropdown-selection><a href=https://iceberg.a [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Tables class=collapse><ul class=sub-menu><li><a href=../configuration/>Configuration</a></li><li><a href=../evolution/>Evolution</a></li><li><a href=../maintenance/>Maintenance</a></li><li><a href=../partitioning/>Partitioning</a></li><li><a href=../performance/>Performance</a></li><li><a href=../reliability/>Reliability</a></li><li><a href=../schemas/>Schemas</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collaps [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Spark class=collapse><ul class=sub-menu><li><a href=../spark-ddl/>DDL</a></li><li><a href=../getting-started/>Getting Started</a></li><li><a href=../spark-procedures/>Procedures</a></li><li><a href=../spark-queries/>Queries</a></li><li><a href=../spark-structured-streaming/>Structured Streaming</a></li><li><a href=../spark-writes/>Writes</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Flink class=collapse><ul class=sub-menu><li><a href=../flink/>Enabling Iceberg in Flink</a></li><li><a href=../flink-connector/>Flink Connector</a></li></ul></div><li><a href=../hive/><span>Hive</span></a></li><li><a target=_blank href=https://trino.io/docs/current/connector/iceberg.html><span>Trino</span></a></li><li><a target=_blank href=https://prestodb.io/docs/current/connector/iceberg.html><span>Presto</span></a></li><li><a target=_ [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Integrations class="collapse in"><ul class=sub-menu><li><a href=../aws/>AWS</a></li><li><a id=active href=../dell/>Dell</a></li><li><a href=../jdbc/>JDBC</a></li><li><a href=../nessie/>Nessie</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href=#API><span>API</span>
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=API class=collapse><ul class=sub-menu><li><a href=../java-api-quickstart/>Java Quickstart</a></li><li><a href=../api/>Java API</a></li><li><a href=../custom-catalog/>Java Custom Catalog</a></li></ul></div><li><a href=https://iceberg.apache.org/docs/1.1.0/../../javadoc/latest><span>Javadoc</span></a></li></div></div><div id=content class=markdown-body><div class=margin-for-toc><h1 id=iceberg-dell-integration>Iceberg Dell Integration</h1>< [...]
+</span></span><span style=display:flex><span>SPARK_VERSION<span style=color:#f92672>=</span>3.2_2.12
+</span></span><span style=display:flex><span>ECS_CLIENT_VERSION<span style=color:#f92672>=</span>3.3.2
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>DEPENDENCIES<span style=color:#f92672>=</span><span style=color:#e6db74>&#34;org.apache.iceberg:iceberg-spark-runtime-</span><span style=color:#e6db74>${</span>SPARK_VERSION<span style=color:#e6db74>}</span><span style=color:#e6db74>:</span><span style=color:#e6db74>${</span>ICEBERG_VERSION<span style=color:#e6db74>}</span><span style=color:#e6db74>,\
+</span></span></span><span style=display:flex><span><span style=color:#e6db74>org.apache.iceberg:iceberg-dell:</span><span style=color:#e6db74>${</span>ICEBERG_VERSION<span style=color:#e6db74>}</span><span style=color:#e6db74>,\
+</span></span></span><span style=display:flex><span><span style=color:#e6db74>com.emc.ecs:object-client-bundle:</span><span style=color:#e6db74>${</span>ECS_CLIENT_VERSION<span style=color:#e6db74>}</span><span style=color:#e6db74>&#34;</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>spark-sql --packages <span style=color:#e6db74>${</span>DEPENDENCIES<span style=color:#e6db74>}</span> <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    --conf spark.sql.extensions<span style=color:#f92672>=</span>org.apache.iceberg.spark.extensions.IcebergSparkSessionExtensions <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    --conf spark.sql.catalog.my_catalog<span style=color:#f92672>=</span>org.apache.iceberg.spark.SparkCatalog <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    --conf spark.sql.catalog.my_catalog.warehouse<span style=color:#f92672>=</span>ecs://bucket-a/namespace-a <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    --conf spark.sql.catalog.my_catalog.catalog-impl<span style=color:#f92672>=</span>org.apache.iceberg.dell.ecs.EcsCatalog <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    --conf spark.sql.catalog.my_catalog.ecs.s3.endpoint<span style=color:#f92672>=</span>http://10.x.x.x:9020 <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    --conf spark.sql.catalog.my_catalog.ecs.s3.access-key-id<span style=color:#f92672>=</span>&lt;Your-ecs-s3-access-key&gt; <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    --conf spark.sql.catalog.my_catalog.ecs.s3.secret-access-key<span style=color:#f92672>=</span>&lt;Your-ecs-s3-secret-access-key&gt;
+</span></span></code></pre></div><p>Then, use <code>my_catalog</code> to access the data in ECS. You can use <code>SHOW NAMESPACES IN my_catalog</code> and <code>SHOW TABLES IN my_catalog</code> to fetch the namespaces and tables of the catalog.</p><p>The related problems of catalog usage:</p><ol><li>The <code>SparkSession.catalog</code> won&rsquo;t access the 3rd-party catalog of Spark in both Python and Scala, so please use DDL SQL to list all tables and namespaces.</li></ol><h3 id=fli [...]
+</span></span><span style=display:flex><span>export HADOOP_CLASSPATH<span style=color:#f92672>=</span><span style=color:#e6db74>`</span>$HADOOP_HOME/bin/hadoop classpath<span style=color:#e6db74>`</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#75715e># download Iceberg dependency</span>
+</span></span><span style=display:flex><span>MAVEN_URL<span style=color:#f92672>=</span>https://repo1.maven.org/maven2
+</span></span><span style=display:flex><span>ICEBERG_VERSION<span style=color:#f92672>=</span>0.15.0
+</span></span><span style=display:flex><span>FLINK_VERSION<span style=color:#f92672>=</span>1.14
+</span></span><span style=display:flex><span>wget <span style=color:#e6db74>${</span>MAVEN_URL<span style=color:#e6db74>}</span>/org/apache/iceberg/iceberg-flink-runtime-<span style=color:#e6db74>${</span>FLINK_VERSION<span style=color:#e6db74>}</span>/<span style=color:#e6db74>${</span>ICEBERG_VERSION<span style=color:#e6db74>}</span>/iceberg-flink-runtime-<span style=color:#e6db74>${</span>FLINK_VERSION<span style=color:#e6db74>}</span>-<span style=color:#e6db74>${</span>ICEBERG_VERSIO [...]
+</span></span><span style=display:flex><span>wget <span style=color:#e6db74>${</span>MAVEN_URL<span style=color:#e6db74>}</span>/org/apache/iceberg/iceberg-dell/<span style=color:#e6db74>${</span>ICEBERG_VERSION<span style=color:#e6db74>}</span>/iceberg-dell-<span style=color:#e6db74>${</span>ICEBERG_VERSION<span style=color:#e6db74>}</span>.jar
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#75715e># download ECS object client</span>
+</span></span><span style=display:flex><span>ECS_CLIENT_VERSION<span style=color:#f92672>=</span>3.3.2
+</span></span><span style=display:flex><span>wget <span style=color:#e6db74>${</span>MAVEN_URL<span style=color:#e6db74>}</span>/com/emc/ecs/object-client-bundle/<span style=color:#e6db74>${</span>ECS_CLIENT_VERSION<span style=color:#e6db74>}</span>/object-client-bundle-<span style=color:#e6db74>${</span>ECS_CLIENT_VERSION<span style=color:#e6db74>}</span>.jar
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#75715e># open the SQL client.</span>
+</span></span><span style=display:flex><span>/path/to/bin/sql-client.sh embedded <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    -j iceberg-flink-runtime-<span style=color:#e6db74>${</span>FLINK_VERSION<span style=color:#e6db74>}</span>-<span style=color:#e6db74>${</span>ICEBERG_VERSION<span style=color:#e6db74>}</span>.jar <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    -j iceberg-dell-<span style=color:#e6db74>${</span>ICEBERG_VERSION<span style=color:#e6db74>}</span>.jar <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    -j object-client-bundle-<span style=color:#e6db74>${</span>ECS_CLIENT_VERSION<span style=color:#e6db74>}</span>.jar <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    shell
+</span></span></code></pre></div><p>Then, use Flink SQL to create a catalog named <code>my_catalog</code>:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-SQL data-lang=SQL><span style=display:flex><span><span style=color:#66d9ef>CREATE</span> <span style=color:#66d9ef>CATALOG</span> my_catalog <span style=color:#66d9ef>WITH</span> (
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#39;type&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;iceberg&#39;</span>,
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#39;warehouse&#39;</span> <span style=color:#f92672>=</span> <span style=color:#e6db74>&#39;ecs://bucket-a/namespace-a&#39;</span>,
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#39;catalog-impl&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;org.apache.iceberg.dell.ecs.EcsCatalog&#39;</span>,
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#39;ecs.s3.endpoint&#39;</span> <span style=color:#f92672>=</span> <span style=color:#e6db74>&#39;http://10.x.x.x:9020&#39;</span>,
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#39;ecs.s3.access-key-id&#39;</span> <span style=color:#f92672>=</span> <span style=color:#e6db74>&#39;&lt;Your-ecs-s3-access-key&gt;&#39;</span>,
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#39;ecs.s3.secret-access-key&#39;</span> <span style=color:#f92672>=</span> <span style=color:#e6db74>&#39;&lt;Your-ecs-s3-secret-access-key&gt;&#39;</span>)
+</span></span></code></pre></div><p>Then, you can run <code>USE CATALOG my_catalog</code>, <code>SHOW DATABASES</code>, and <code>SHOW TABLES</code> to fetch the namespaces and tables of the catalog.</p><h3 id=limitations>Limitations</h3><p>When you use the catalog with Dell ECS only, you should care about these limitations:</p><ol><li><code>RENAME</code> statements are supported without other protections. When you try to rename a table, you need to guarantee all commits are finished in  [...]
+<script src=https://iceberg.apache.org/docs/1.1.0//js/jquery.easing.min.js></script>
+<script type=text/javascript src=https://iceberg.apache.org/docs/1.1.0//js/search.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/bootstrap.min.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/iceberg-theme.js></script></html>
\ No newline at end of file
diff --git a/docs/1.1.0/docssearch.json b/docs/1.1.0/docssearch.json
index 0637a088..040ab524 100644
--- a/docs/1.1.0/docssearch.json
+++ b/docs/1.1.0/docssearch.json
@@ -1 +1 @@
-[]
\ No newline at end of file
+[{"categories":null,"content":" Getting Started The latest version of Iceberg is 1.0.0.\nSpark is currently the most feature-rich compute engine for Iceberg operations. We recommend you to get started with Spark to understand Iceberg concepts and features with examples. You can also view documentations of using Iceberg with other compute engine under the Engines tab.\nUsing Iceberg in Spark 3 To use Iceberg in a Spark shell, use the --packages option:\nspark-shell --packages org.apache.i [...]
\ No newline at end of file
diff --git a/docs/1.1.0/evolution/index.html b/docs/1.1.0/evolution/index.html
new file mode 100644
index 00000000..8ca57672
--- /dev/null
+++ b/docs/1.1.0/evolution/index.html
@@ -0,0 +1,37 @@
+<!doctype html><html><head><meta charset=utf-8><meta http-equiv=x-ua-compatible content="IE=edge"><meta name=viewport content="width=device-width,initial-scale=1"><meta name=description content><meta name=author content><title>Evolution</title><link href=../css/bootstrap.css rel=stylesheet><link href=../css/markdown.css rel=stylesheet><link href=../css/katex.min.css rel=stylesheet><link href=../css/iceberg-theme.css rel=stylesheet><link href=../font-awesome-4.7.0/css/font-awesome.min.css [...]
+<span class=sr-only>Toggle navigation</span>
+<span class=icon-bar></span>
+<span class=icon-bar></span>
+<span class=icon-bar></span></button>
+<a class="page-scroll navbar-brand" href=https://iceberg.apache.org/><img class=top-navbar-logo src=https://iceberg.apache.org/docs/1.1.0//img/iceberg-logo-icon.png> Apache Iceberg</a></div><div><input type=search class=form-control id=search-input placeholder=Search... maxlength=64 data-hotkeys=s/></div><div class=versions-dropdown><span>1.0.0</span> <i class="fa fa-chevron-down"></i><div class=versions-dropdown-content><ul><li class=versions-dropdown-selection><a href=https://iceberg.a [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Tables class="collapse in"><ul class=sub-menu><li><a href=../configuration/>Configuration</a></li><li><a id=active href=../evolution/>Evolution</a></li><li><a href=../maintenance/>Maintenance</a></li><li><a href=../partitioning/>Partitioning</a></li><li><a href=../performance/>Performance</a></li><li><a href=../reliability/>Reliability</a></li><li><a href=../schemas/>Schemas</a></li></ul></div><li><a class="chevron-toggle collapsed" data [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Spark class=collapse><ul class=sub-menu><li><a href=../spark-ddl/>DDL</a></li><li><a href=../getting-started/>Getting Started</a></li><li><a href=../spark-procedures/>Procedures</a></li><li><a href=../spark-queries/>Queries</a></li><li><a href=../spark-structured-streaming/>Structured Streaming</a></li><li><a href=../spark-writes/>Writes</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Flink class=collapse><ul class=sub-menu><li><a href=../flink/>Enabling Iceberg in Flink</a></li><li><a href=../flink-connector/>Flink Connector</a></li></ul></div><li><a href=../hive/><span>Hive</span></a></li><li><a target=_blank href=https://trino.io/docs/current/connector/iceberg.html><span>Trino</span></a></li><li><a target=_blank href=https://prestodb.io/docs/current/connector/iceberg.html><span>Presto</span></a></li><li><a target=_ [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Integrations class=collapse><ul class=sub-menu><li><a href=../aws/>AWS</a></li><li><a href=../dell/>Dell</a></li><li><a href=../jdbc/>JDBC</a></li><li><a href=../nessie/>Nessie</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href=#API><span>API</span>
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=API class=collapse><ul class=sub-menu><li><a href=../java-api-quickstart/>Java Quickstart</a></li><li><a href=../api/>Java API</a></li><li><a href=../custom-catalog/>Java Custom Catalog</a></li></ul></div><li><a href=https://iceberg.apache.org/docs/1.1.0/../../javadoc/latest><span>Javadoc</span></a></li></div></div><div id=content class=markdown-body><div class=margin-for-toc><h1 id=evolution>Evolution</h1><p>Iceberg supports <strong>in- [...]
+<em>The data for 2008 is partitioned by month. Starting from 2009 the table is updated so that the data is instead partitioned by day. Both partitioning layouts are able to coexist in the same table.</em></p><p>Iceberg uses <a href=../partitioning>hidden partitioning</a>, so you don&rsquo;t <em>need</em> to write queries for a specific partition layout to be fast. Instead, you can write queries that select the data you need, and Iceberg automatically prunes out files that don&rsquo;t con [...]
+For example, the following code could be used to update the partition spec to add a new partition field that places <code>id</code> column values into 8 buckets and remove an existing partition field <code>category</code>:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-java data-lang=java><span style=display:flex><span>Table sampleTable <span style=color:#f92672>=</span> <span style=color: [...]
+</span></span><span style=display:flex><span>sampleTable<span style=color:#f92672>.</span><span style=color:#a6e22e>updateSpec</span><span style=color:#f92672>()</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>addField</span><span style=color:#f92672>(</span>bucket<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;id&#34;</span><span style=color:#f92672>,</span> <span style=color:#ae81ff>8</span><span style=color:#f92672>))</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>removeField</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;category&#34;</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>commit</span><span style=color:#f92672>();</span>
+</span></span></code></pre></div><p>Spark supports updating partition spec through its <code>ALTER TABLE</code> SQL statement, see more details in <a href=../spark-ddl/#alter-table--add-partition-field>Spark SQL</a>.</p><h2 id=sort-order-evolution>Sort order evolution</h2><p>Similar to partition spec, Iceberg sort order can also be updated in an existing table.
+When you evolve a sort order, the old data written with an earlier order remains unchanged.
+Engines can always choose to write data in the latest sort order or unsorted when sorting is prohibitively expensive.</p><p>Iceberg&rsquo;s Java table API provides <code>replaceSortOrder</code> API to update sort order.
+For example, the following code could be used to create a new sort order
+with <code>id</code> column sorted in ascending order with nulls last,
+and <code>category</code> column sorted in descending order with nulls first:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-java data-lang=java><span style=display:flex><span>Table sampleTable <span style=color:#f92672>=</span> <span style=color:#f92672>...;</span>
+</span></span><span style=display:flex><span>sampleTable<span style=color:#f92672>.</span><span style=color:#a6e22e>replaceSortOrder</span><span style=color:#f92672>()</span>
+</span></span><span style=display:flex><span>   <span style=color:#f92672>.</span><span style=color:#a6e22e>asc</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;id&#34;</span><span style=color:#f92672>,</span> NullOrder<span style=color:#f92672>.</span><span style=color:#a6e22e>NULLS_LAST</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>   <span style=color:#f92672>.</span><span style=color:#a6e22e>dec</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;category&#34;</span><span style=color:#f92672>,</span> NullOrder<span style=color:#f92672>.</span><span style=color:#a6e22e>NULL_FIRST</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>   <span style=color:#f92672>.</span><span style=color:#a6e22e>commit</span><span style=color:#f92672>();</span>
+</span></span></code></pre></div><p>Spark supports updating sort order through its <code>ALTER TABLE</code> SQL statement, see more details in <a href=../spark-ddl/#alter-table--write-ordered-by>Spark SQL</a>.</p></div><div id=toc class=markdown-body><div id=full><nav id=TableOfContents><ul><li><a href=#schema-evolution>Schema evolution</a><ul><li><a href=#correctness>Correctness</a></li></ul></li><li><a href=#partition-evolution>Partition evolution</a></li><li><a href=#sort-order-evolut [...]
+<script src=https://iceberg.apache.org/docs/1.1.0//js/jquery.easing.min.js></script>
+<script type=text/javascript src=https://iceberg.apache.org/docs/1.1.0//js/search.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/bootstrap.min.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/iceberg-theme.js></script></html>
\ No newline at end of file
diff --git a/docs/1.1.0/flink-connector/index.html b/docs/1.1.0/flink-connector/index.html
new file mode 100644
index 00000000..37cd5618
--- /dev/null
+++ b/docs/1.1.0/flink-connector/index.html
@@ -0,0 +1,85 @@
+<!doctype html><html><head><meta charset=utf-8><meta http-equiv=x-ua-compatible content="IE=edge"><meta name=viewport content="width=device-width,initial-scale=1"><meta name=description content><meta name=author content><title>Flink Connector</title><link href=../css/bootstrap.css rel=stylesheet><link href=../css/markdown.css rel=stylesheet><link href=../css/katex.min.css rel=stylesheet><link href=../css/iceberg-theme.css rel=stylesheet><link href=../font-awesome-4.7.0/css/font-awesome.m [...]
+<span class=sr-only>Toggle navigation</span>
+<span class=icon-bar></span>
+<span class=icon-bar></span>
+<span class=icon-bar></span></button>
+<a class="page-scroll navbar-brand" href=https://iceberg.apache.org/><img class=top-navbar-logo src=https://iceberg.apache.org/docs/1.1.0//img/iceberg-logo-icon.png> Apache Iceberg</a></div><div><input type=search class=form-control id=search-input placeholder=Search... maxlength=64 data-hotkeys=s/></div><div class=versions-dropdown><span>1.0.0</span> <i class="fa fa-chevron-down"></i><div class=versions-dropdown-content><ul><li class=versions-dropdown-selection><a href=https://iceberg.a [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Tables class=collapse><ul class=sub-menu><li><a href=../configuration/>Configuration</a></li><li><a href=../evolution/>Evolution</a></li><li><a href=../maintenance/>Maintenance</a></li><li><a href=../partitioning/>Partitioning</a></li><li><a href=../performance/>Performance</a></li><li><a href=../reliability/>Reliability</a></li><li><a href=../schemas/>Schemas</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collaps [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Spark class=collapse><ul class=sub-menu><li><a href=../spark-ddl/>DDL</a></li><li><a href=../getting-started/>Getting Started</a></li><li><a href=../spark-procedures/>Procedures</a></li><li><a href=../spark-queries/>Queries</a></li><li><a href=../spark-structured-streaming/>Structured Streaming</a></li><li><a href=../spark-writes/>Writes</a></li></ul></div><li><a class=chevron-toggle data-toggle=collapse data-parent=full href=#Flink><spa [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Flink class="collapse in"><ul class=sub-menu><li><a href=../flink/>Enabling Iceberg in Flink</a></li><li><a id=active href=../flink-connector/>Flink Connector</a></li></ul></div><li><a href=../hive/><span>Hive</span></a></li><li><a target=_blank href=https://trino.io/docs/current/connector/iceberg.html><span>Trino</span></a></li><li><a target=_blank href=https://prestodb.io/docs/current/connector/iceberg.html><span>Presto</span></a></li> [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Integrations class=collapse><ul class=sub-menu><li><a href=../aws/>AWS</a></li><li><a href=../dell/>Dell</a></li><li><a href=../jdbc/>JDBC</a></li><li><a href=../nessie/>Nessie</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href=#API><span>API</span>
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=API class=collapse><ul class=sub-menu><li><a href=../java-api-quickstart/>Java Quickstart</a></li><li><a href=../api/>Java API</a></li><li><a href=../custom-catalog/>Java Custom Catalog</a></li></ul></div><li><a href=https://iceberg.apache.org/docs/1.1.0/../../javadoc/latest><span>Javadoc</span></a></li></div></div><div id=content class=markdown-body><div class=margin-for-toc><h1 id=flink-connector>Flink Connector</h1><p>Apache Flink sup [...]
+which is just mapping to the underlying iceberg table instead of maintaining iceberg table directly in current Flink catalog.</p><p>To create the table in Flink SQL by using SQL syntax <code>CREATE TABLE test (..) WITH ('connector'='iceberg', ...)</code>, Flink iceberg connector provides the following table properties:</p><ul><li><code>connector</code>: Use the constant <code>iceberg</code>.</li><li><code>catalog-name</code>: User-specified catalog name. It&rsquo;s required because the c [...]
+</span></span><span style=display:flex><span>    id   BIGINT,
+</span></span><span style=display:flex><span>    <span style=color:#66d9ef>data</span> STRING
+</span></span><span style=display:flex><span>) <span style=color:#66d9ef>WITH</span> (
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#39;connector&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;iceberg&#39;</span>,
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#39;catalog-name&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;hive_prod&#39;</span>,
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#39;uri&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;thrift://localhost:9083&#39;</span>,
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#39;warehouse&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;hdfs://nn:8020/path/to/warehouse&#39;</span>
+</span></span><span style=display:flex><span>);
+</span></span></code></pre></div><p>If you want to create a Flink table mapping to a different iceberg table managed in Hive catalog (such as <code>hive_db.hive_iceberg_table</code> in Hive), then you can create Flink table as following:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>CREATE</span> <span style=color: [...]
+</span></span><span style=display:flex><span>    id   BIGINT,
+</span></span><span style=display:flex><span>    <span style=color:#66d9ef>data</span> STRING
+</span></span><span style=display:flex><span>) <span style=color:#66d9ef>WITH</span> (
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#39;connector&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;iceberg&#39;</span>,
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#39;catalog-name&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;hive_prod&#39;</span>,
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#39;catalog-database&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;hive_db&#39;</span>,
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#39;catalog-table&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;hive_iceberg_table&#39;</span>,
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#39;uri&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;thrift://localhost:9083&#39;</span>,
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#39;warehouse&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;hdfs://nn:8020/path/to/warehouse&#39;</span>
+</span></span><span style=display:flex><span>);
+</span></span></code></pre></div><div class=info>The underlying catalog database (<code>hive_db</code> in the above example) will be created automatically if it does not exist when writing records into the Flink table.</div><h2 id=table-managed-in-hadoop-catalog>Table managed in hadoop catalog</h2><p>The following SQL will create a Flink table in current Flink catalog, which maps to the iceberg table <code>default_database.flink_table</code> managed in hadoop catalog.</p><div class=highl [...]
+</span></span><span style=display:flex><span>    id   BIGINT,
+</span></span><span style=display:flex><span>    <span style=color:#66d9ef>data</span> STRING
+</span></span><span style=display:flex><span>) <span style=color:#66d9ef>WITH</span> (
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#39;connector&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;iceberg&#39;</span>,
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#39;catalog-name&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;hadoop_prod&#39;</span>,
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#39;catalog-type&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;hadoop&#39;</span>,
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#39;warehouse&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;hdfs://nn:8020/path/to/warehouse&#39;</span>
+</span></span><span style=display:flex><span>);
+</span></span></code></pre></div><h2 id=table-managed-in-custom-catalog>Table managed in custom catalog</h2><p>The following SQL will create a Flink table in current Flink catalog, which maps to the iceberg table <code>default_database.flink_table</code> managed in
+a custom catalog of type <code>com.my.custom.CatalogImpl</code>.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>CREATE</span> <span style=color:#66d9ef>TABLE</span> flink_table (
+</span></span><span style=display:flex><span>    id   BIGINT,
+</span></span><span style=display:flex><span>    <span style=color:#66d9ef>data</span> STRING
+</span></span><span style=display:flex><span>) <span style=color:#66d9ef>WITH</span> (
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#39;connector&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;iceberg&#39;</span>,
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#39;catalog-name&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;custom_prod&#39;</span>,
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#39;catalog-impl&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;com.my.custom.CatalogImpl&#39;</span>,
+</span></span><span style=display:flex><span>     <span style=color:#75715e>-- More table properties for the customized catalog
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>    <span style=color:#e6db74>&#39;my-additional-catalog-config&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;my-value&#39;</span>,
+</span></span><span style=display:flex><span>     ...
+</span></span><span style=display:flex><span>);
+</span></span></code></pre></div><p>Please check sections under the Integrations tab for all custom catalogs.</p><h2 id=a-complete-example>A complete example.</h2><p>Take the Hive catalog as an example:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>CREATE</span> <span style=color:#66d9ef>TABLE</span> flink_table (
+</span></span><span style=display:flex><span>    id   BIGINT,
+</span></span><span style=display:flex><span>    <span style=color:#66d9ef>data</span> STRING
+</span></span><span style=display:flex><span>) <span style=color:#66d9ef>WITH</span> (
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#39;connector&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;iceberg&#39;</span>,
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#39;catalog-name&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;hive_prod&#39;</span>,
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#39;uri&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;thrift://localhost:9083&#39;</span>,
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#39;warehouse&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;file:///path/to/warehouse&#39;</span>
+</span></span><span style=display:flex><span>);
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#66d9ef>INSERT</span> <span style=color:#66d9ef>INTO</span> flink_table <span style=color:#66d9ef>VALUES</span> (<span style=color:#ae81ff>1</span>, <span style=color:#e6db74>&#39;AAA&#39;</span>), (<span style=color:#ae81ff>2</span>, <span style=color:#e6db74>&#39;BBB&#39;</span>), (<span style=color:#ae81ff>3</span>, <span style=color:#e6db74>&#39;CCC&#39;</span>);
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#66d9ef>SET</span> execution.<span style=color:#66d9ef>result</span><span style=color:#f92672>-</span><span style=color:#66d9ef>mode</span><span style=color:#f92672>=</span>tableau;
+</span></span><span style=display:flex><span><span style=color:#66d9ef>SELECT</span> <span style=color:#f92672>*</span> <span style=color:#66d9ef>FROM</span> flink_table;
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#f92672>+</span><span style=color:#75715e>----+------+
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span><span style=color:#f92672>|</span> id <span style=color:#f92672>|</span> <span style=color:#66d9ef>data</span> <span style=color:#f92672>|</span>
+</span></span><span style=display:flex><span><span style=color:#f92672>+</span><span style=color:#75715e>----+------+
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span><span style=color:#f92672>|</span>  <span style=color:#ae81ff>1</span> <span style=color:#f92672>|</span>  AAA <span style=color:#f92672>|</span>
+</span></span><span style=display:flex><span><span style=color:#f92672>|</span>  <span style=color:#ae81ff>2</span> <span style=color:#f92672>|</span>  BBB <span style=color:#f92672>|</span>
+</span></span><span style=display:flex><span><span style=color:#f92672>|</span>  <span style=color:#ae81ff>3</span> <span style=color:#f92672>|</span>  CCC <span style=color:#f92672>|</span>
+</span></span><span style=display:flex><span><span style=color:#f92672>+</span><span style=color:#75715e>----+------+
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span><span style=color:#ae81ff>3</span> <span style=color:#66d9ef>rows</span> <span style=color:#66d9ef>in</span> <span style=color:#66d9ef>set</span>
+</span></span></code></pre></div><p>For more details, please refer to the Iceberg <a href=../flink>Flink document</a>.</p></div><div id=toc class=markdown-body><div id=full><nav id=TableOfContents><ul><li><a href=#table-managed-in-hive-catalog>Table managed in Hive catalog.</a></li><li><a href=#table-managed-in-hadoop-catalog>Table managed in hadoop catalog</a></li><li><a href=#table-managed-in-custom-catalog>Table managed in custom catalog</a></li><li><a href=#a-complete-example>A compl [...]
+<script src=https://iceberg.apache.org/docs/1.1.0//js/jquery.easing.min.js></script>
+<script type=text/javascript src=https://iceberg.apache.org/docs/1.1.0//js/search.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/bootstrap.min.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/iceberg-theme.js></script></html>
\ No newline at end of file
diff --git a/docs/1.1.0/flink/flink-connector/index.html b/docs/1.1.0/flink/flink-connector/index.html
new file mode 100644
index 00000000..c9452ac5
--- /dev/null
+++ b/docs/1.1.0/flink/flink-connector/index.html
@@ -0,0 +1 @@
+<!doctype html><html lang=en-us><head><title>https://iceberg.apache.org/docs/1.1.0/flink-connector/</title><link rel=canonical href=https://iceberg.apache.org/docs/1.1.0/flink-connector/><meta name=robots content="noindex"><meta charset=utf-8><meta http-equiv=refresh content="0; url=https://iceberg.apache.org/docs/1.1.0/flink-connector/"></head></html>
\ No newline at end of file
diff --git a/docs/1.1.0/flink/index.html b/docs/1.1.0/flink/index.html
new file mode 100644
index 00000000..2a228590
--- /dev/null
+++ b/docs/1.1.0/flink/index.html
@@ -0,0 +1,335 @@
+<!doctype html><html><head><meta charset=utf-8><meta http-equiv=x-ua-compatible content="IE=edge"><meta name=viewport content="width=device-width,initial-scale=1"><meta name=description content><meta name=author content><title>Enabling Iceberg in Flink</title><link href=../css/bootstrap.css rel=stylesheet><link href=../css/markdown.css rel=stylesheet><link href=../css/katex.min.css rel=stylesheet><link href=../css/iceberg-theme.css rel=stylesheet><link href=../font-awesome-4.7.0/css/font [...]
+<span class=sr-only>Toggle navigation</span>
+<span class=icon-bar></span>
+<span class=icon-bar></span>
+<span class=icon-bar></span></button>
+<a class="page-scroll navbar-brand" href=https://iceberg.apache.org/><img class=top-navbar-logo src=https://iceberg.apache.org/docs/1.1.0//img/iceberg-logo-icon.png> Apache Iceberg</a></div><div><input type=search class=form-control id=search-input placeholder=Search... maxlength=64 data-hotkeys=s/></div><div class=versions-dropdown><span>1.0.0</span> <i class="fa fa-chevron-down"></i><div class=versions-dropdown-content><ul><li class=versions-dropdown-selection><a href=https://iceberg.a [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Tables class=collapse><ul class=sub-menu><li><a href=../configuration/>Configuration</a></li><li><a href=../evolution/>Evolution</a></li><li><a href=../maintenance/>Maintenance</a></li><li><a href=../partitioning/>Partitioning</a></li><li><a href=../performance/>Performance</a></li><li><a href=../reliability/>Reliability</a></li><li><a href=../schemas/>Schemas</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collaps [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Spark class=collapse><ul class=sub-menu><li><a href=../spark-ddl/>DDL</a></li><li><a href=../getting-started/>Getting Started</a></li><li><a href=../spark-procedures/>Procedures</a></li><li><a href=../spark-queries/>Queries</a></li><li><a href=../spark-structured-streaming/>Structured Streaming</a></li><li><a href=../spark-writes/>Writes</a></li></ul></div><li><a class=chevron-toggle data-toggle=collapse data-parent=full href=#Flink><spa [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Flink class="collapse in"><ul class=sub-menu><li><a id=active href=../flink/>Enabling Iceberg in Flink</a></li><li><a href=../flink-connector/>Flink Connector</a></li></ul></div><li><a href=../hive/><span>Hive</span></a></li><li><a target=_blank href=https://trino.io/docs/current/connector/iceberg.html><span>Trino</span></a></li><li><a target=_blank href=https://prestodb.io/docs/current/connector/iceberg.html><span>Presto</span></a></li> [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Integrations class=collapse><ul class=sub-menu><li><a href=../aws/>AWS</a></li><li><a href=../dell/>Dell</a></li><li><a href=../jdbc/>JDBC</a></li><li><a href=../nessie/>Nessie</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href=#API><span>API</span>
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=API class=collapse><ul class=sub-menu><li><a href=../java-api-quickstart/>Java Quickstart</a></li><li><a href=../api/>Java API</a></li><li><a href=../custom-catalog/>Java Custom Catalog</a></li></ul></div><li><a href=https://iceberg.apache.org/docs/1.1.0/../../javadoc/latest><span>Javadoc</span></a></li></div></div><div id=content class=markdown-body><div class=margin-for-toc><h1 id=flink>Flink</h1><p>Apache Iceberg supports both <a href [...]
+</span></span><span style=display:flex><span>SCALA_VERSION<span style=color:#f92672>=</span>2.12
+</span></span><span style=display:flex><span>APACHE_FLINK_URL<span style=color:#f92672>=</span>archive.apache.org/dist/flink/
+</span></span><span style=display:flex><span>wget <span style=color:#e6db74>${</span>APACHE_FLINK_URL<span style=color:#e6db74>}</span>/flink-<span style=color:#e6db74>${</span>FLINK_VERSION<span style=color:#e6db74>}</span>/flink-<span style=color:#e6db74>${</span>FLINK_VERSION<span style=color:#e6db74>}</span>-bin-scala_<span style=color:#e6db74>${</span>SCALA_VERSION<span style=color:#e6db74>}</span>.tgz
+</span></span><span style=display:flex><span>tar xzvf flink-<span style=color:#e6db74>${</span>FLINK_VERSION<span style=color:#e6db74>}</span>-bin-scala_<span style=color:#e6db74>${</span>SCALA_VERSION<span style=color:#e6db74>}</span>.tgz
+</span></span></code></pre></div><p>Step.2 Start a standalone flink cluster within hadoop environment.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-bash data-lang=bash><span style=display:flex><span><span style=color:#75715e># HADOOP_HOME is your hadoop root directory after unpack the binary package.</span>
+</span></span><span style=display:flex><span>export HADOOP_CLASSPATH<span style=color:#f92672>=</span><span style=color:#e6db74>`</span>$HADOOP_HOME/bin/hadoop classpath<span style=color:#e6db74>`</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#75715e># Start the flink standalone cluster</span>
+</span></span><span style=display:flex><span>./bin/start-cluster.sh
+</span></span></code></pre></div><p>Step.3 Start the flink SQL client.</p><p>We&rsquo;ve created a separate <code>flink-runtime</code> module in iceberg project to generate a bundled jar, which could be loaded by flink SQL client directly.</p><p>If we want to build the <code>flink-runtime</code> bundled jar manually, please just build the <code>iceberg</code> project and it will generate the jar under <code>&lt;iceberg-root-dir>/flink-runtime/build/libs</code>. Of course, we could also d [...]
+</span></span><span style=display:flex><span>export HADOOP_CLASSPATH<span style=color:#f92672>=</span><span style=color:#e6db74>`</span>$HADOOP_HOME/bin/hadoop classpath<span style=color:#e6db74>`</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>./bin/sql-client.sh embedded -j &lt;flink-runtime-directory&gt;/iceberg-flink-runtime-xxx.jar shell
+</span></span></code></pre></div><p>By default, iceberg has included hadoop jars for hadoop catalog. If we want to use hive catalog, we will need to load the hive jars when opening the flink sql client. Fortunately, apache flink has provided a <a href=https://repo.maven.apache.org/maven2/org/apache/flink/flink-sql-connector-hive-2.3.6_2.11/1.11.0/flink-sql-connector-hive-2.3.6_2.11-1.11.0.jar>bundled hive jar</a> for sql client. So we could open the sql client
+as the following:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-bash data-lang=bash><span style=display:flex><span><span style=color:#75715e># HADOOP_HOME is your hadoop root directory after unpack the binary package.</span>
+</span></span><span style=display:flex><span>export HADOOP_CLASSPATH<span style=color:#f92672>=</span><span style=color:#e6db74>`</span>$HADOOP_HOME/bin/hadoop classpath<span style=color:#e6db74>`</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#75715e># download Iceberg dependency</span>
+</span></span><span style=display:flex><span>ICEBERG_VERSION<span style=color:#f92672>=</span>0.11.1
+</span></span><span style=display:flex><span>MAVEN_URL<span style=color:#f92672>=</span>https://repo1.maven.org/maven2
+</span></span><span style=display:flex><span>ICEBERG_MAVEN_URL<span style=color:#f92672>=</span><span style=color:#e6db74>${</span>MAVEN_URL<span style=color:#e6db74>}</span>/org/apache/iceberg
+</span></span><span style=display:flex><span>ICEBERG_PACKAGE<span style=color:#f92672>=</span>iceberg-flink-runtime
+</span></span><span style=display:flex><span>wget <span style=color:#e6db74>${</span>ICEBERG_MAVEN_URL<span style=color:#e6db74>}</span>/<span style=color:#e6db74>${</span>ICEBERG_PACKAGE<span style=color:#e6db74>}</span>/<span style=color:#e6db74>${</span>ICEBERG_VERSION<span style=color:#e6db74>}</span>/<span style=color:#e6db74>${</span>ICEBERG_PACKAGE<span style=color:#e6db74>}</span>-<span style=color:#e6db74>${</span>ICEBERG_VERSION<span style=color:#e6db74>}</span>.jar
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#75715e># download the flink-sql-connector-hive-${HIVE_VERSION}_${SCALA_VERSION}-${FLINK_VERSION}.jar</span>
+</span></span><span style=display:flex><span>HIVE_VERSION<span style=color:#f92672>=</span>2.3.6
+</span></span><span style=display:flex><span>SCALA_VERSION<span style=color:#f92672>=</span>2.11
+</span></span><span style=display:flex><span>FLINK_VERSION<span style=color:#f92672>=</span>1.11.0
+</span></span><span style=display:flex><span>FLINK_CONNECTOR_URL<span style=color:#f92672>=</span><span style=color:#e6db74>${</span>MAVEN_URL<span style=color:#e6db74>}</span>/org/apache/flink
+</span></span><span style=display:flex><span>FLINK_CONNECTOR_PACKAGE<span style=color:#f92672>=</span>flink-sql-connector-hive
+</span></span><span style=display:flex><span>wget <span style=color:#e6db74>${</span>FLINK_CONNECTOR_URL<span style=color:#e6db74>}</span>/<span style=color:#e6db74>${</span>FLINK_CONNECTOR_PACKAGE<span style=color:#e6db74>}</span>-<span style=color:#e6db74>${</span>HIVE_VERSION<span style=color:#e6db74>}</span>_<span style=color:#e6db74>${</span>SCALA_VERSION<span style=color:#e6db74>}</span>/<span style=color:#e6db74>${</span>FLINK_VERSION<span style=color:#e6db74>}</span>/<span style= [...]
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#75715e># open the SQL client.</span>
+</span></span><span style=display:flex><span>/path/to/bin/sql-client.sh embedded <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    -j <span style=color:#e6db74>${</span>ICEBERG_PACKAGE<span style=color:#e6db74>}</span>-<span style=color:#e6db74>${</span>ICEBERG_VERSION<span style=color:#e6db74>}</span>.jar <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    -j <span style=color:#e6db74>${</span>FLINK_CONNECTOR_PACKAGE<span style=color:#e6db74>}</span>-<span style=color:#e6db74>${</span>HIVE_VERSION<span style=color:#e6db74>}</span>_<span style=color:#e6db74>${</span>SCALA_VERSION<span style=color:#e6db74>}</span>-<span style=color:#e6db74>${</span>FLINK_VERSION<span style=color:#e6db74>}</span>.jar <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    shell
+</span></span></code></pre></div><h2 id=preparation-when-using-flinks-python-api>Preparation when using Flink&rsquo;s Python API</h2><p>Install the Apache Flink dependency using <code>pip</code></p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-python data-lang=python><span style=display:flex><span>pip install apache<span style=color:#f92672>-</span>flink<span style=color:#f92672>==</span><sp [...]
+</span></span></code></pre></div><p>In order for <code>pyflink</code> to function properly, it needs to have access to all Hadoop jars. For <code>pyflink</code>
+we need to copy those Hadoop jars to the installation directory of <code>pyflink</code>, which can be found under
+<code>&lt;PYTHON_ENV_INSTALL_DIR>/site-packages/pyflink/lib/</code> (see also a mention of this on
+the <a href=http://mail-archives.apache.org/mod_mbox/flink-user/202105.mbox/%3C3D98BDD2-89B1-42F5-B6F4-6C06A038F978%40gmail.com%3E>Flink ML</a>).
+We can use the following short Python script to copy all Hadoop jars (you need to make sure that <code>HADOOP_HOME</code>
+points to your Hadoop installation):</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-python data-lang=python><span style=display:flex><span><span style=color:#f92672>import</span> os
+</span></span><span style=display:flex><span><span style=color:#f92672>import</span> shutil
+</span></span><span style=display:flex><span><span style=color:#f92672>import</span> site
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#66d9ef>def</span> <span style=color:#a6e22e>copy_all_hadoop_jars_to_pyflink</span>():
+</span></span><span style=display:flex><span>    <span style=color:#66d9ef>if</span> <span style=color:#f92672>not</span> os<span style=color:#f92672>.</span>getenv(<span style=color:#e6db74>&#34;HADOOP_HOME&#34;</span>):
+</span></span><span style=display:flex><span>        <span style=color:#66d9ef>raise</span> <span style=color:#a6e22e>Exception</span>(<span style=color:#e6db74>&#34;The HADOOP_HOME env var must be set and point to a valid Hadoop installation&#34;</span>)
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>    jar_files <span style=color:#f92672>=</span> []
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>    <span style=color:#66d9ef>def</span> <span style=color:#a6e22e>find_pyflink_lib_dir</span>():
+</span></span><span style=display:flex><span>        <span style=color:#66d9ef>for</span> dir <span style=color:#f92672>in</span> site<span style=color:#f92672>.</span>getsitepackages():
+</span></span><span style=display:flex><span>            package_dir <span style=color:#f92672>=</span> os<span style=color:#f92672>.</span>path<span style=color:#f92672>.</span>join(dir, <span style=color:#e6db74>&#34;pyflink&#34;</span>, <span style=color:#e6db74>&#34;lib&#34;</span>)
+</span></span><span style=display:flex><span>            <span style=color:#66d9ef>if</span> os<span style=color:#f92672>.</span>path<span style=color:#f92672>.</span>exists(package_dir):
+</span></span><span style=display:flex><span>                <span style=color:#66d9ef>return</span> package_dir
+</span></span><span style=display:flex><span>        <span style=color:#66d9ef>return</span> <span style=color:#66d9ef>None</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>    <span style=color:#66d9ef>for</span> root, _, files <span style=color:#f92672>in</span> os<span style=color:#f92672>.</span>walk(os<span style=color:#f92672>.</span>getenv(<span style=color:#e6db74>&#34;HADOOP_HOME&#34;</span>)):
+</span></span><span style=display:flex><span>        <span style=color:#66d9ef>for</span> file <span style=color:#f92672>in</span> files:
+</span></span><span style=display:flex><span>            <span style=color:#66d9ef>if</span> file<span style=color:#f92672>.</span>endswith(<span style=color:#e6db74>&#34;.jar&#34;</span>):
+</span></span><span style=display:flex><span>                jar_files<span style=color:#f92672>.</span>append(os<span style=color:#f92672>.</span>path<span style=color:#f92672>.</span>join(root, file))
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>    pyflink_lib_dir <span style=color:#f92672>=</span> find_pyflink_lib_dir()
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>    num_jar_files <span style=color:#f92672>=</span> len(jar_files)
+</span></span><span style=display:flex><span>    print(<span style=color:#e6db74>f</span><span style=color:#e6db74>&#34;Copying </span><span style=color:#e6db74>{</span>num_jar_files<span style=color:#e6db74>}</span><span style=color:#e6db74> Hadoop jar files to pyflink&#39;s lib directory at </span><span style=color:#e6db74>{</span>pyflink_lib_dir<span style=color:#e6db74>}</span><span style=color:#e6db74>&#34;</span>)
+</span></span><span style=display:flex><span>    <span style=color:#66d9ef>for</span> jar <span style=color:#f92672>in</span> jar_files:
+</span></span><span style=display:flex><span>        shutil<span style=color:#f92672>.</span>copy(jar, pyflink_lib_dir)
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#66d9ef>if</span> __name__ <span style=color:#f92672>==</span> <span style=color:#e6db74>&#39;__main__&#39;</span>:
+</span></span><span style=display:flex><span>    copy_all_hadoop_jars_to_pyflink()
+</span></span></code></pre></div><p>Once the script finished, you should see output similar to</p><pre tabindex=0><code>Copying 645 Hadoop jar files to pyflink&#39;s lib directory at &lt;PYTHON_DIR&gt;/lib/python3.8/site-packages/pyflink/lib
+</code></pre><p>Now we need to provide a <code>file://</code> path to the <code>iceberg-flink-runtime</code> jar, which we can either get by building the project
+and looking at <code>&lt;iceberg-root-dir>/flink-runtime/build/libs</code>, or downloading it from the <a href=https://repo.maven.apache.org/maven2/org/apache/iceberg/iceberg-flink-runtime/>Apache official repository</a>.
+Third-party libs can be added to <code>pyflink</code> via <code>env.add_jars("file:///my/jar/path/connector.jar")</code> / <code>table_env.get_config().get_configuration().set_string("pipeline.jars", "file:///my/jar/path/connector.jar")</code>, which is also mentioned in the official <a href=https://ci.apache.org/projects/flink/flink-docs-release-1.13/docs/dev/python/dependency_management/>docs</a>.
+In our example we&rsquo;re using <code>env.add_jars(..)</code> as shown below:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-python data-lang=python><span style=display:flex><span><span style=color:#f92672>import</span> os
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#f92672>from</span> pyflink.datastream <span style=color:#f92672>import</span> StreamExecutionEnvironment
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>env <span style=color:#f92672>=</span> StreamExecutionEnvironment<span style=color:#f92672>.</span>get_execution_environment()
+</span></span><span style=display:flex><span>iceberg_flink_runtime_jar <span style=color:#f92672>=</span> os<span style=color:#f92672>.</span>path<span style=color:#f92672>.</span>join(os<span style=color:#f92672>.</span>getcwd(), <span style=color:#e6db74>&#34;iceberg-flink-runtime-1.0.0.jar&#34;</span>)
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>env<span style=color:#f92672>.</span>add_jars(<span style=color:#e6db74>&#34;file://</span><span style=color:#e6db74>{}</span><span style=color:#e6db74>&#34;</span><span style=color:#f92672>.</span>format(iceberg_flink_runtime_jar))
+</span></span></code></pre></div><p>Once we reached this point, we can then create a <code>StreamTableEnvironment</code> and execute Flink SQL statements.
+The below example shows how to create a custom catalog via the Python Table API:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-python data-lang=python><span style=display:flex><span><span style=color:#f92672>from</span> pyflink.table <span style=color:#f92672>import</span> StreamTableEnvironment
+</span></span><span style=display:flex><span>table_env <span style=color:#f92672>=</span> StreamTableEnvironment<span style=color:#f92672>.</span>create(env)
+</span></span><span style=display:flex><span>table_env<span style=color:#f92672>.</span>execute_sql(<span style=color:#e6db74>&#34;CREATE CATALOG my_catalog WITH (&#34;</span>
+</span></span><span style=display:flex><span>                      <span style=color:#e6db74>&#34;&#39;type&#39;=&#39;iceberg&#39;, &#34;</span>
+</span></span><span style=display:flex><span>                      <span style=color:#e6db74>&#34;&#39;catalog-impl&#39;=&#39;com.my.custom.CatalogImpl&#39;, &#34;</span>
+</span></span><span style=display:flex><span>                      <span style=color:#e6db74>&#34;&#39;my-additional-catalog-config&#39;=&#39;my-value&#39;)&#34;</span>)
+</span></span></code></pre></div><p>For more details, please refer to the <a href=https://ci.apache.org/projects/flink/flink-docs-release-1.13/docs/dev/python/table/intro_to_table_api/>Python Table API</a>.</p><h2 id=creating-catalogs-and-using-catalogs>Creating catalogs and using catalogs.</h2><p>Flink 1.11 support to create catalogs by using flink sql.</p><h3 id=catalog-configuration>Catalog Configuration</h3><p>A catalog is created and named by executing the following query (replace < [...]
+<code>&lt;config_key></code>=<code>&lt;config_value></code> with catalog implementation config):</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>CREATE</span> <span style=color:#66d9ef>CATALOG</span> <span style=color:#f92672>&lt;</span><span style=color:#66d9ef>catalog_name</span><span style=color:#f92672>&gt;</span [...]
+</span></span><span style=display:flex><span>  <span style=color:#e6db74>&#39;type&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;iceberg&#39;</span>,
+</span></span><span style=display:flex><span>  <span style=color:#f92672>`&lt;</span>config_key<span style=color:#f92672>&gt;`=`&lt;</span>config_value<span style=color:#f92672>&gt;`</span>
+</span></span><span style=display:flex><span>); 
+</span></span></code></pre></div><p>The following properties can be set globally and are not limited to a specific catalog implementation:</p><ul><li><code>type</code>: Must be <code>iceberg</code>. (required)</li><li><code>catalog-type</code>: <code>hive</code> or <code>hadoop</code> for built-in catalogs, or left unset for custom catalog implementations using catalog-impl. (Optional)</li><li><code>catalog-impl</code>: The fully-qualified class name of a custom catalog implementation. M [...]
+</span></span><span style=display:flex><span>  <span style=color:#e6db74>&#39;type&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;iceberg&#39;</span>,
+</span></span><span style=display:flex><span>  <span style=color:#e6db74>&#39;catalog-type&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;hive&#39;</span>,
+</span></span><span style=display:flex><span>  <span style=color:#e6db74>&#39;uri&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;thrift://localhost:9083&#39;</span>,
+</span></span><span style=display:flex><span>  <span style=color:#e6db74>&#39;clients&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;5&#39;</span>,
+</span></span><span style=display:flex><span>  <span style=color:#e6db74>&#39;property-version&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;1&#39;</span>,
+</span></span><span style=display:flex><span>  <span style=color:#e6db74>&#39;warehouse&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;hdfs://nn:8020/warehouse/path&#39;</span>
+</span></span><span style=display:flex><span>);
+</span></span></code></pre></div><p>The following properties can be set if using the Hive catalog:</p><ul><li><code>uri</code>: The Hive metastore&rsquo;s thrift URI. (Required)</li><li><code>clients</code>: The Hive metastore client pool size, default value is 2. (Optional)</li><li><code>warehouse</code>: The Hive warehouse location, users should specify this path if neither set the <code>hive-conf-dir</code> to specify a location containing a <code>hive-site.xml</code> configuration fi [...]
+</span></span><span style=display:flex><span>  <span style=color:#e6db74>&#39;type&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;iceberg&#39;</span>,
+</span></span><span style=display:flex><span>  <span style=color:#e6db74>&#39;catalog-type&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;hadoop&#39;</span>,
+</span></span><span style=display:flex><span>  <span style=color:#e6db74>&#39;warehouse&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;hdfs://nn:8020/warehouse/path&#39;</span>,
+</span></span><span style=display:flex><span>  <span style=color:#e6db74>&#39;property-version&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;1&#39;</span>
+</span></span><span style=display:flex><span>);
+</span></span></code></pre></div><p>The following properties can be set if using the Hadoop catalog:</p><ul><li><code>warehouse</code>: The HDFS directory to store metadata files and data files. (Required)</li></ul><p>We could execute the sql command <code>USE CATALOG hive_catalog</code> to set the current catalog.</p><h3 id=custom-catalog>Custom catalog</h3><p>Flink also supports loading a custom Iceberg <code>Catalog</code> implementation by specifying the <code>catalog-impl</code> pro [...]
+</span></span><span style=display:flex><span>  <span style=color:#e6db74>&#39;type&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;iceberg&#39;</span>,
+</span></span><span style=display:flex><span>  <span style=color:#e6db74>&#39;catalog-impl&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;com.my.custom.CatalogImpl&#39;</span>,
+</span></span><span style=display:flex><span>  <span style=color:#e6db74>&#39;my-additional-catalog-config&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;my-value&#39;</span>
+</span></span><span style=display:flex><span>);
+</span></span></code></pre></div><h3 id=create-through-yaml-config>Create through YAML config</h3><p>Catalogs can be registered in <code>sql-client-defaults.yaml</code> before starting the SQL client. Here is an example:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-yaml data-lang=yaml><span style=display:flex><span><span style=color:#f92672>catalogs</span>: 
+</span></span><span style=display:flex><span>  - <span style=color:#f92672>name</span>: <span style=color:#ae81ff>my_catalog</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>type</span>: <span style=color:#ae81ff>iceberg</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>catalog-type</span>: <span style=color:#ae81ff>hadoop</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>warehouse</span>: <span style=color:#ae81ff>hdfs://nn:8020/warehouse/path</span>
+</span></span></code></pre></div><h3 id=create-through-sql-files>Create through SQL Files</h3><p>Since the <code>sql-client-defaults.yaml</code> file was removed in flink 1.14, SQL Client supports the -i startup option to execute an initialization SQL file to setup environment when starting up the SQL Client.
+An example of such a file is presented below.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#75715e>-- define available catalogs
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span><span style=color:#66d9ef>CREATE</span> <span style=color:#66d9ef>CATALOG</span> hive_catalog <span style=color:#66d9ef>WITH</span> (
+</span></span><span style=display:flex><span>  <span style=color:#e6db74>&#39;type&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;iceberg&#39;</span>,
+</span></span><span style=display:flex><span>  <span style=color:#e6db74>&#39;catalog-type&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;hive&#39;</span>,
+</span></span><span style=display:flex><span>  <span style=color:#e6db74>&#39;uri&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;thrift://localhost:9083&#39;</span>,
+</span></span><span style=display:flex><span>  <span style=color:#e6db74>&#39;warehouse&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;hdfs://nn:8020/warehouse/path&#39;</span>
+</span></span><span style=display:flex><span>);
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>USE <span style=color:#66d9ef>CATALOG</span> hive_catalog;
+</span></span></code></pre></div><p>using -i &lt;init.sql> option to initialize SQL Client session</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-bash data-lang=bash><span style=display:flex><span>/path/to/bin/sql-client.sh -i /path/to/init.sql
+</span></span></code></pre></div><h2 id=ddl-commands>DDL commands</h2><h3 id=create-database><code>CREATE DATABASE</code></h3><p>By default, iceberg will use the <code>default</code> database in flink. Using the following example to create a separate database if we don&rsquo;t want to create tables under the <code>default</code> database:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql  [...]
+</span></span><span style=display:flex><span>USE iceberg_db;
+</span></span></code></pre></div><h3 id=create-table><code>CREATE TABLE</code></h3><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>CREATE</span> <span style=color:#66d9ef>TABLE</span> <span style=color:#f92672>`</span>hive_catalog<span style=color:#f92672>`</span>.<span style=color:#f92672>`</span><span style=color:#66d [...]
+</span></span><span style=display:flex><span>    id BIGINT <span style=color:#66d9ef>COMMENT</span> <span style=color:#e6db74>&#39;unique id&#39;</span>,
+</span></span><span style=display:flex><span>    <span style=color:#66d9ef>data</span> STRING
+</span></span><span style=display:flex><span>);
+</span></span></code></pre></div><p>Table create commands support the most commonly used <a href=https://ci.apache.org/projects/flink/flink-docs-release-1.11/dev/table/sql/create.html#create-table>flink create clauses</a> now, including:</p><ul><li><code>PARTITION BY (column1, column2, ...)</code> to configure partitioning, apache flink does not yet support hidden partitioning.</li><li><code>COMMENT 'table document'</code> to set a table description.</li><li><code>WITH ('key'='value', .. [...]
+</span></span><span style=display:flex><span>    id BIGINT <span style=color:#66d9ef>COMMENT</span> <span style=color:#e6db74>&#39;unique id&#39;</span>,
+</span></span><span style=display:flex><span>    <span style=color:#66d9ef>data</span> STRING
+</span></span><span style=display:flex><span>) PARTITIONED <span style=color:#66d9ef>BY</span> (<span style=color:#66d9ef>data</span>);
+</span></span></code></pre></div><p>Apache Iceberg support hidden partition but apache flink don&rsquo;t support partitioning by a function on columns, so we&rsquo;ve no way to support hidden partition in flink DDL now, we will improve apache flink DDL in future.</p><h3 id=create-table-like><code>CREATE TABLE LIKE</code></h3><p>To create a table with the same schema, partitioning, and table properties as another table, use <code>CREATE TABLE LIKE</code>.</p><div class=highlight><pre tabi [...]
+</span></span><span style=display:flex><span>    id BIGINT <span style=color:#66d9ef>COMMENT</span> <span style=color:#e6db74>&#39;unique id&#39;</span>,
+</span></span><span style=display:flex><span>    <span style=color:#66d9ef>data</span> STRING
+</span></span><span style=display:flex><span>);
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#66d9ef>CREATE</span> <span style=color:#66d9ef>TABLE</span>  <span style=color:#f92672>`</span>hive_catalog<span style=color:#f92672>`</span>.<span style=color:#f92672>`</span><span style=color:#66d9ef>default</span><span style=color:#f92672>`</span>.<span style=color:#f92672>`</span>sample_like<span style=color:#f92672>`</span> <span style=color:#66d9ef>LIKE</span> <span style=color:#f92672>`</span>hive_catalog<span style=c [...]
+</span></span></code></pre></div><p>For more details, refer to the <a href=https://ci.apache.org/projects/flink/flink-docs-release-1.11/dev/table/sql/create.html#create-table>Flink <code>CREATE TABLE</code> documentation</a>.</p><h3 id=alter-table><code>ALTER TABLE</code></h3><p>Iceberg only support altering table properties in flink 1.11 now.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language [...]
+</span></span></code></pre></div><h3 id=alter-table--rename-to><code>ALTER TABLE .. RENAME TO</code></h3><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> <span style=color:#f92672>`</span>hive_catalog<span style=color:#f92672>`</span>.<span style=color:#f92672>`</span>< [...]
+</span></span></code></pre></div><h3 id=drop-table><code>DROP TABLE</code></h3><p>To delete a table, run:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>DROP</span> <span style=color:#66d9ef>TABLE</span> <span style=color:#f92672>`</span>hive_catalog<span style=color:#f92672>`</span>.<span style=color:#f92672>`</spa [...]
+</span></span></code></pre></div><h2 id=querying-with-sql>Querying with SQL</h2><p>Iceberg support both streaming and batch read in flink now. we could execute the following sql command to switch the execute type from &lsquo;streaming&rsquo; mode to &lsquo;batch&rsquo; mode, and vice versa:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><spa [...]
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span><span style=color:#66d9ef>SET</span> execution.runtime<span style=color:#f92672>-</span><span style=color:#66d9ef>mode</span> <span style=color:#f92672>=</span> streaming;
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#75715e>-- Execute the flink job in batch mode for current session context
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span><span style=color:#66d9ef>SET</span> execution.runtime<span style=color:#f92672>-</span><span style=color:#66d9ef>mode</span> <span style=color:#f92672>=</span> batch;
+</span></span></code></pre></div><h3 id=flink-batch-read>Flink batch read</h3><p>If want to check all the rows in iceberg table by submitting a flink <strong>batch</strong> job, you could execute the following sentences:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#75715e>-- Execute the flink job in batch mode for curren [...]
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span><span style=color:#66d9ef>SET</span> execution.runtime<span style=color:#f92672>-</span><span style=color:#66d9ef>mode</span> <span style=color:#f92672>=</span> batch;
+</span></span><span style=display:flex><span><span style=color:#66d9ef>SELECT</span> <span style=color:#f92672>*</span> <span style=color:#66d9ef>FROM</span> sample       ;
+</span></span></code></pre></div><h3 id=flink-streaming-read>Flink streaming read</h3><p>Iceberg supports processing incremental data in flink streaming jobs which starts from a historical snapshot-id:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#75715e>-- Submit the flink job in streaming mode for current session.
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span><span style=color:#66d9ef>SET</span> execution.runtime<span style=color:#f92672>-</span><span style=color:#66d9ef>mode</span> <span style=color:#f92672>=</span> streaming;
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#75715e>-- Enable this switch because streaming read SQL will provide few job options in flink SQL hint options.
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span><span style=color:#66d9ef>SET</span> <span style=color:#66d9ef>table</span>.<span style=color:#66d9ef>dynamic</span><span style=color:#f92672>-</span><span style=color:#66d9ef>table</span><span style=color:#f92672>-</span><span style=color:#66d9ef>options</span>.enabled<span style=color:#f92672>=</span><span style=color:#66d9ef>true</span>;
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#75715e>-- Read all the records from the iceberg current snapshot, and then read incremental data starting from that snapshot.
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span><span style=color:#66d9ef>SELECT</span> <span style=color:#f92672>*</span> <span style=color:#66d9ef>FROM</span> sample <span style=color:#75715e>/*+ OPTIONS(&#39;streaming&#39;=&#39;true&#39;, &#39;monitor-interval&#39;=&#39;1s&#39;)*/</span> ;
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#75715e>-- Read all incremental data starting from the snapshot-id &#39;3821550127947089987&#39; (records from this snapshot will be excluded).
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span><span style=color:#66d9ef>SELECT</span> <span style=color:#f92672>*</span> <span style=color:#66d9ef>FROM</span> sample <span style=color:#75715e>/*+ OPTIONS(&#39;streaming&#39;=&#39;true&#39;, &#39;monitor-interval&#39;=&#39;1s&#39;, &#39;start-snapshot-id&#39;=&#39;3821550127947089987&#39;)*/</span> ;
+</span></span></code></pre></div><p>Those are the options that could be set in flink SQL hint options for streaming job:</p><ul><li>monitor-interval: time interval for consecutively monitoring newly committed data files (default value: &rsquo;10s&rsquo;).</li><li>start-snapshot-id: the snapshot id that streaming job starts from.</li></ul><h3 id=flip-27-source-for-sql>FLIP-27 source for SQL</h3><p>Here are the SQL settings for the FLIP-27 source, which is only available
+for Flink 1.14 or above. All other SQL settings and options
+documented above are applicable to the FLIP-27 source.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#75715e>-- Opt in the FLIP-27 source. Default is false.
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span><span style=color:#66d9ef>SET</span> <span style=color:#66d9ef>table</span>.<span style=color:#66d9ef>exec</span>.iceberg.use<span style=color:#f92672>-</span>flip27<span style=color:#f92672>-</span><span style=color:#66d9ef>source</span> <span style=color:#f92672>=</span> <span style=color:#66d9ef>true</span>;
+</span></span></code></pre></div><h2 id=writing-with-sql>Writing with SQL</h2><p>Iceberg support both <code>INSERT INTO</code> and <code>INSERT OVERWRITE</code> in flink 1.11 now.</p><h3 id=insert-into><code>INSERT INTO</code></h3><p>To append new data to a table with a flink streaming job, use <code>INSERT INTO</code>:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span [...]
+</span></span><span style=display:flex><span><span style=color:#66d9ef>INSERT</span> <span style=color:#66d9ef>INTO</span> <span style=color:#f92672>`</span>hive_catalog<span style=color:#f92672>`</span>.<span style=color:#f92672>`</span><span style=color:#66d9ef>default</span><span style=color:#f92672>`</span>.<span style=color:#f92672>`</span>sample<span style=color:#f92672>`</span> <span style=color:#66d9ef>SELECT</span> id, <span style=color:#66d9ef>data</span> <span style=color:#66d [...]
+</span></span></code></pre></div><h3 id=insert-overwrite><code>INSERT OVERWRITE</code></h3><p>To replace data in the table with the result of a query, use <code>INSERT OVERWRITE</code> in batch job (flink streaming job does not support <code>INSERT OVERWRITE</code>). Overwrites are atomic operations for Iceberg tables.</p><p>Partitions that have rows produced by the SELECT query will be replaced, for example:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#2 [...]
+</span></span></code></pre></div><p>Iceberg also support overwriting given partitions by the <code>select</code> values:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>INSERT</span> OVERWRITE <span style=color:#f92672>`</span>hive_catalog<span style=color:#f92672>`</span>.<span style=color:#f92672>`</span><span styl [...]
+</span></span></code></pre></div><p>For a partitioned iceberg table, when all the partition columns are set a value in <code>PARTITION</code> clause, it is inserting into a static partition, otherwise if partial partition columns (prefix part of all partition columns) are set a value in <code>PARTITION</code> clause, it is writing the query result into a dynamic partition.
+For an unpartitioned iceberg table, its data will be completely overwritten by <code>INSERT OVERWRITE</code>.</p><h3 id=upsert><code>UPSERT</code></h3><p>Iceberg supports <code>UPSERT</code> based on the primary key when writing data into v2 table format. There are two ways to enable upsert.</p><ol><li>Enable the <code>UPSERT</code> mode as table-level property <code>write.upsert.enabled</code>. Here is an example SQL statement to set the table property when creating a table. It would be [...]
+  `id`  INT UNIQUE COMMENT &#39;unique id&#39;,
+  `data` STRING NOT NULL,
+ PRIMARY KEY(`id`) NOT ENFORCED
+) with (&#39;format-version&#39;=&#39;2&#39;, &#39;write.upsert.enabled&#39;=&#39;true&#39;);
+</code></pre><ol start=2><li>Enabling <code>UPSERT</code> mode using <code>upsert-enabled</code> in the <a href=#Write-options>write options</a> provides more flexibility than a table level config. Note that you still need to use v2 table format and specify the primary key when creating the table.</li></ol><pre tabindex=0><code>INSERT INTO tableName /*+ OPTIONS(&#39;upsert-enabled&#39;=&#39;true&#39;) */
+...
+</code></pre><div class=info>OVERWRITE and UPSERT can&rsquo;t be set together. In UPSERT mode, if the table is partitioned, the partition fields should be included in equality fields.</div><h2 id=reading-with-datastream>Reading with DataStream</h2><p>Iceberg support streaming or batch read in Java API now.</p><h3 id=batch-read>Batch Read</h3><p>This example will read all records from iceberg table and then print to the stdout console in flink batch job:</p><div class=highlight><pre tabin [...]
+</span></span><span style=display:flex><span>TableLoader tableLoader <span style=color:#f92672>=</span> TableLoader<span style=color:#f92672>.</span><span style=color:#a6e22e>fromHadoopTable</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;hdfs://nn:8020/warehouse/path&#34;</span><span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>DataStream<span style=color:#f92672>&lt;</span>RowData<span style=color:#f92672>&gt;</span> batch <span style=color:#f92672>=</span> FlinkSource<span style=color:#f92672>.</span><span style=color:#a6e22e>forRowData</span><span style=color:#f92672>()</span>
+</span></span><span style=display:flex><span>     <span style=color:#f92672>.</span><span style=color:#a6e22e>env</span><span style=color:#f92672>(</span>env<span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>     <span style=color:#f92672>.</span><span style=color:#a6e22e>tableLoader</span><span style=color:#f92672>(</span>tableLoader<span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>     <span style=color:#f92672>.</span><span style=color:#a6e22e>streaming</span><span style=color:#f92672>(</span><span style=color:#66d9ef>false</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>     <span style=color:#f92672>.</span><span style=color:#a6e22e>build</span><span style=color:#f92672>();</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#75715e>// Print all records to stdout.
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>batch<span style=color:#f92672>.</span><span style=color:#a6e22e>print</span><span style=color:#f92672>();</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#75715e>// Submit and execute this batch read job.
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>env<span style=color:#f92672>.</span><span style=color:#a6e22e>execute</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;Test Iceberg Batch Read&#34;</span><span style=color:#f92672>);</span>
+</span></span></code></pre></div><h3 id=streaming-read>Streaming read</h3><p>This example will read incremental records which start from snapshot-id &lsquo;3821550127947089987&rsquo; and print to stdout console in flink streaming job:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-java data-lang=java><span style=display:flex><span>StreamExecutionEnvironment env <span style=color:#f92672>=< [...]
+</span></span><span style=display:flex><span>TableLoader tableLoader <span style=color:#f92672>=</span> TableLoader<span style=color:#f92672>.</span><span style=color:#a6e22e>fromHadoopTable</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;hdfs://nn:8020/warehouse/path&#34;</span><span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>DataStream<span style=color:#f92672>&lt;</span>RowData<span style=color:#f92672>&gt;</span> stream <span style=color:#f92672>=</span> FlinkSource<span style=color:#f92672>.</span><span style=color:#a6e22e>forRowData</span><span style=color:#f92672>()</span>
+</span></span><span style=display:flex><span>     <span style=color:#f92672>.</span><span style=color:#a6e22e>env</span><span style=color:#f92672>(</span>env<span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>     <span style=color:#f92672>.</span><span style=color:#a6e22e>tableLoader</span><span style=color:#f92672>(</span>tableLoader<span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>     <span style=color:#f92672>.</span><span style=color:#a6e22e>streaming</span><span style=color:#f92672>(</span><span style=color:#66d9ef>true</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>     <span style=color:#f92672>.</span><span style=color:#a6e22e>startSnapshotId</span><span style=color:#f92672>(</span><span style=color:#ae81ff>3821550127947089987L</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>     <span style=color:#f92672>.</span><span style=color:#a6e22e>build</span><span style=color:#f92672>();</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#75715e>// Print all records to stdout.
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>stream<span style=color:#f92672>.</span><span style=color:#a6e22e>print</span><span style=color:#f92672>();</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#75715e>// Submit and execute this streaming read job.
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>env<span style=color:#f92672>.</span><span style=color:#a6e22e>execute</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;Test Iceberg Streaming Read&#34;</span><span style=color:#f92672>);</span>
+</span></span></code></pre></div><p>There are other options that we could set by Java API, please see the <a href=../../../javadoc/1.0.0/org/apache/iceberg/flink/source/FlinkSource.html>FlinkSource#Builder</a>.</p><h2 id=reading-with-datastream-flip-27-source>Reading with DataStream (FLIP-27 source)</h2><p><a href=https://cwiki.apache.org/confluence/display/FLINK/FLIP-27%3A+Refactor+Source+Interface>FLIP-27 source interface</a>
+was introduced in Flink 1.12. It aims to solve several shortcomings of the old <code>SourceFunction</code>
+streaming source interface. It also unifies the source interfaces for both batch and streaming executions.
+Most source connectors (like Kafka, file) in Flink repo have migrated to the FLIP-27 interface.
+Flink is planning to deprecate the old <code>SourceFunction</code> interface in the near future.</p><p>A FLIP-27 based Flink <code>IcebergSource</code> is added in <code>iceberg-flink</code> module for Flink 1.14 or above.
+The FLIP-27 <code>IcebergSource</code> is currently an experimental feature.</p><h3 id=batch-read-1>Batch Read</h3><p>This example will read all records from iceberg table and then print to the stdout console in flink batch job:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-java data-lang=java><span style=display:flex><span>StreamExecutionEnvironment env <span style=color:#f92672>=</span> [...]
+</span></span><span style=display:flex><span>TableLoader tableLoader <span style=color:#f92672>=</span> TableLoader<span style=color:#f92672>.</span><span style=color:#a6e22e>fromHadoopTable</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;hdfs://nn:8020/warehouse/path&#34;</span><span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>IcebergSource<span style=color:#f92672>&lt;</span>RowData<span style=color:#f92672>&gt;</span> source <span style=color:#f92672>=</span> IcebergSource<span style=color:#f92672>.</span><span style=color:#a6e22e>forRowData</span><span style=color:#f92672>()</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>tableLoader</span><span style=color:#f92672>(</span>tableLoader<span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>assignerFactory</span><span style=color:#f92672>(</span><span style=color:#66d9ef>new</span> SimpleSplitAssignerFactory<span style=color:#f92672>())</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>build</span><span style=color:#f92672>();</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>DataStream<span style=color:#f92672>&lt;</span>RowData<span style=color:#f92672>&gt;</span> batch <span style=color:#f92672>=</span> env<span style=color:#f92672>.</span><span style=color:#a6e22e>fromSource</span><span style=color:#f92672>(</span>
+</span></span><span style=display:flex><span>    source<span style=color:#f92672>,</span>
+</span></span><span style=display:flex><span>    WatermarkStrategy<span style=color:#f92672>.</span><span style=color:#a6e22e>noWatermarks</span><span style=color:#f92672>(),</span>
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#34;My Iceberg Source&#34;</span><span style=color:#f92672>,</span>
+</span></span><span style=display:flex><span>    TypeInformation<span style=color:#f92672>.</span><span style=color:#a6e22e>of</span><span style=color:#f92672>(</span>RowData<span style=color:#f92672>.</span><span style=color:#a6e22e>class</span><span style=color:#f92672>));</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#75715e>// Print all records to stdout.
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>batch<span style=color:#f92672>.</span><span style=color:#a6e22e>print</span><span style=color:#f92672>();</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#75715e>// Submit and execute this batch read job.
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>env<span style=color:#f92672>.</span><span style=color:#a6e22e>execute</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;Test Iceberg Batch Read&#34;</span><span style=color:#f92672>);</span>
+</span></span></code></pre></div><h3 id=streaming-read-1>Streaming read</h3><p>This example will start the streaming read from the latest table snapshot (inclusive).
+Every 60s, it polls Iceberg table to discover new append-only snapshots.
+CDC read is not supported yet.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-java data-lang=java><span style=display:flex><span>StreamExecutionEnvironment env <span style=color:#f92672>=</span> StreamExecutionEnvironment<span style=color:#f92672>.</span><span style=color:#a6e22e>createLocalEnvironment</span><span style=color:#f92672>();</span>
+</span></span><span style=display:flex><span>TableLoader tableLoader <span style=color:#f92672>=</span> TableLoader<span style=color:#f92672>.</span><span style=color:#a6e22e>fromHadoopTable</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;hdfs://nn:8020/warehouse/path&#34;</span><span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>IcebergSource source <span style=color:#f92672>=</span> IcebergSource<span style=color:#f92672>.</span><span style=color:#a6e22e>forRowData</span><span style=color:#f92672>()</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>tableLoader</span><span style=color:#f92672>(</span>tableLoader<span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>assignerFactory</span><span style=color:#f92672>(</span><span style=color:#66d9ef>new</span> SimpleSplitAssignerFactory<span style=color:#f92672>())</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>streaming</span><span style=color:#f92672>(</span><span style=color:#66d9ef>true</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>streamingStartingStrategy</span><span style=color:#f92672>(</span>StreamingStartingStrategy<span style=color:#f92672>.</span><span style=color:#a6e22e>INCREMENTAL_FROM_LATEST_SNAPSHOT</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>monitorInterval</span><span style=color:#f92672>(</span>Duration<span style=color:#f92672>.</span><span style=color:#a6e22e>ofSeconds</span><span style=color:#f92672>(</span><span style=color:#ae81ff>60</span><span style=color:#f92672>))</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>build</span><span style=color:#f92672>()</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>DataStream<span style=color:#f92672>&lt;</span>RowData<span style=color:#f92672>&gt;</span> stream <span style=color:#f92672>=</span> env<span style=color:#f92672>.</span><span style=color:#a6e22e>fromSource</span><span style=color:#f92672>(</span>
+</span></span><span style=display:flex><span>    source<span style=color:#f92672>,</span>
+</span></span><span style=display:flex><span>    WatermarkStrategy<span style=color:#f92672>.</span><span style=color:#a6e22e>noWatermarks</span><span style=color:#f92672>(),</span>
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#34;My Iceberg Source&#34;</span><span style=color:#f92672>,</span>
+</span></span><span style=display:flex><span>    TypeInformation<span style=color:#f92672>.</span><span style=color:#a6e22e>of</span><span style=color:#f92672>(</span>RowData<span style=color:#f92672>.</span><span style=color:#a6e22e>class</span><span style=color:#f92672>));</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#75715e>// Print all records to stdout.
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>stream<span style=color:#f92672>.</span><span style=color:#a6e22e>print</span><span style=color:#f92672>();</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#75715e>// Submit and execute this streaming read job.
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>env<span style=color:#f92672>.</span><span style=color:#a6e22e>execute</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;Test Iceberg Streaming Read&#34;</span><span style=color:#f92672>);</span>
+</span></span></code></pre></div><p>There are other options that we could set by Java API, please see the
+<a href=../../../javadoc/1.0.0/org/apache/iceberg/flink/source/IcebergSource.html>IcebergSource#Builder</a>.</p><h2 id=writing-with-datastream>Writing with DataStream</h2><p>Iceberg support writing to iceberg table from different DataStream input.</p><h3 id=appending-data>Appending data.</h3><p>we have supported writing <code>DataStream&lt;RowData></code> and <code>DataStream&lt;Row></code> to the sink iceberg table natively.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;ba [...]
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>DataStream<span style=color:#f92672>&lt;</span>RowData<span style=color:#f92672>&gt;</span> input <span style=color:#f92672>=</span> <span style=color:#f92672>...</span> <span style=color:#f92672>;</span>
+</span></span><span style=display:flex><span>Configuration hadoopConf <span style=color:#f92672>=</span> <span style=color:#66d9ef>new</span> Configuration<span style=color:#f92672>();</span>
+</span></span><span style=display:flex><span>TableLoader tableLoader <span style=color:#f92672>=</span> TableLoader<span style=color:#f92672>.</span><span style=color:#a6e22e>fromHadoopTable</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;hdfs://nn:8020/warehouse/path&#34;</span><span style=color:#f92672>,</span> hadoopConf<span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>FlinkSink<span style=color:#f92672>.</span><span style=color:#a6e22e>forRowData</span><span style=color:#f92672>(</span>input<span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>tableLoader</span><span style=color:#f92672>(</span>tableLoader<span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>build</span><span style=color:#f92672>();</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>env<span style=color:#f92672>.</span><span style=color:#a6e22e>execute</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;Test Iceberg DataStream&#34;</span><span style=color:#f92672>);</span>
+</span></span></code></pre></div><p>The iceberg API also allows users to write generic <code>DataStream&lt;T></code> to iceberg table, more example could be found in this <a href=https://github.com/apache/iceberg/blob/master/flink/v1.15/flink/src/test/java/org/apache/iceberg/flink/sink/TestFlinkIcebergSink.java>unit test</a>.</p><h3 id=overwrite-data>Overwrite data</h3><p>To overwrite the data in existing iceberg table dynamically, we could set the <code>overwrite</code> flag in FlinkSin [...]
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>DataStream<span style=color:#f92672>&lt;</span>RowData<span style=color:#f92672>&gt;</span> input <span style=color:#f92672>=</span> <span style=color:#f92672>...</span> <span style=color:#f92672>;</span>
+</span></span><span style=display:flex><span>Configuration hadoopConf <span style=color:#f92672>=</span> <span style=color:#66d9ef>new</span> Configuration<span style=color:#f92672>();</span>
+</span></span><span style=display:flex><span>TableLoader tableLoader <span style=color:#f92672>=</span> TableLoader<span style=color:#f92672>.</span><span style=color:#a6e22e>fromHadoopTable</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;hdfs://nn:8020/warehouse/path&#34;</span><span style=color:#f92672>,</span> hadoopConf<span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>FlinkSink<span style=color:#f92672>.</span><span style=color:#a6e22e>forRowData</span><span style=color:#f92672>(</span>input<span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>tableLoader</span><span style=color:#f92672>(</span>tableLoader<span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>overwrite</span><span style=color:#f92672>(</span><span style=color:#66d9ef>true</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>build</span><span style=color:#f92672>();</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>env<span style=color:#f92672>.</span><span style=color:#a6e22e>execute</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;Test Iceberg DataStream&#34;</span><span style=color:#f92672>);</span>
+</span></span></code></pre></div><h3 id=upsert-data>Upsert data</h3><p>To upsert the data in existing iceberg table, we could set the <code>upsert</code> flag in FlinkSink builder. The table must use v2 table format and have a primary key.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-java data-lang=java><span style=display:flex><span>StreamExecutionEnvironment env <span style=color:#f926 [...]
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>DataStream<span style=color:#f92672>&lt;</span>RowData<span style=color:#f92672>&gt;</span> input <span style=color:#f92672>=</span> <span style=color:#f92672>...</span> <span style=color:#f92672>;</span>
+</span></span><span style=display:flex><span>Configuration hadoopConf <span style=color:#f92672>=</span> <span style=color:#66d9ef>new</span> Configuration<span style=color:#f92672>();</span>
+</span></span><span style=display:flex><span>TableLoader tableLoader <span style=color:#f92672>=</span> TableLoader<span style=color:#f92672>.</span><span style=color:#a6e22e>fromHadoopTable</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;hdfs://nn:8020/warehouse/path&#34;</span><span style=color:#f92672>,</span> hadoopConf<span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>FlinkSink<span style=color:#f92672>.</span><span style=color:#a6e22e>forRowData</span><span style=color:#f92672>(</span>input<span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>tableLoader</span><span style=color:#f92672>(</span>tableLoader<span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>upsert</span><span style=color:#f92672>(</span><span style=color:#66d9ef>true</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>build</span><span style=color:#f92672>();</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>env<span style=color:#f92672>.</span><span style=color:#a6e22e>execute</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;Test Iceberg DataStream&#34;</span><span style=color:#f92672>);</span>
+</span></span></code></pre></div><div class=info>OVERWRITE and UPSERT can&rsquo;t be set together. In UPSERT mode, if the table is partitioned, the partition fields should be included in equality fields.</div><h2 id=write-options>Write options</h2><p>Flink write options are passed when configuring the FlinkSink, like this:</p><pre tabindex=0><code>FlinkSink.Builder builder = FlinkSink.forRow(dataStream, SimpleDataUtil.FLINK_SCHEMA)
+    .table(table)
+    .tableLoader(tableLoader)
+    .set(&#34;write-format&#34;, &#34;orc&#34;)
+    .set(FlinkWriteOptions.OVERWRITE_MODE, &#34;true&#34;);
+</code></pre><p>For Flink SQL, write options can be passed in via SQL hints like this:</p><pre tabindex=0><code>INSERT INTO tableName /*+ OPTIONS(&#39;upsert-enabled&#39;=&#39;true&#39;) */
+...
+</code></pre><table><thead><tr><th>Flink option</th><th>Default</th><th>Description</th></tr></thead><tbody><tr><td>write-format</td><td>Table write.format.default</td><td>File format to use for this write operation; parquet, avro, or orc</td></tr><tr><td>target-file-size-bytes</td><td>As per table property</td><td>Overrides this table&rsquo;s write.target-file-size-bytes</td></tr><tr><td>upsert-enabled</td><td>Table write.upsert.enabled</td><td>Overrides this table&rsquo;s write.upsert. [...]
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>TableLoader tableLoader <span style=color:#f92672>=</span> TableLoader<span style=color:#f92672>.</span><span style=color:#a6e22e>fromHadoopTable</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;hdfs://nn:8020/warehouse/path&#34;</span><span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>Table table <span style=color:#f92672>=</span> tableLoader<span style=color:#f92672>.</span><span style=color:#a6e22e>loadTable</span><span style=color:#f92672>();</span>
+</span></span><span style=display:flex><span>RewriteDataFilesActionResult result <span style=color:#f92672>=</span> Actions<span style=color:#f92672>.</span><span style=color:#a6e22e>forTable</span><span style=color:#f92672>(</span>table<span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>        <span style=color:#f92672>.</span><span style=color:#a6e22e>rewriteDataFiles</span><span style=color:#f92672>()</span>
+</span></span><span style=display:flex><span>        <span style=color:#f92672>.</span><span style=color:#a6e22e>execute</span><span style=color:#f92672>();</span>
+</span></span></code></pre></div><p>For more doc about options of the rewrite files action, please see <a href=../../../javadoc/1.0.0/org/apache/iceberg/flink/actions/RewriteDataFilesAction.html>RewriteDataFilesAction</a></p><h2 id=type-conversion>Type conversion</h2><p>Iceberg&rsquo;s integration for Flink automatically converts between Flink and Iceberg types. When writing to a table with types that are not supported by Flink, like UUID, Iceberg will accept and convert values from the  [...]
+<script src=https://iceberg.apache.org/docs/1.1.0//js/jquery.easing.min.js></script>
+<script type=text/javascript src=https://iceberg.apache.org/docs/1.1.0//js/search.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/bootstrap.min.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/iceberg-theme.js></script></html>
\ No newline at end of file
diff --git a/docs/1.1.0/getting-started/index.html b/docs/1.1.0/getting-started/index.html
new file mode 100644
index 00000000..cff37ad0
--- /dev/null
+++ b/docs/1.1.0/getting-started/index.html
@@ -0,0 +1,53 @@
+<!doctype html><html><head><meta charset=utf-8><meta http-equiv=x-ua-compatible content="IE=edge"><meta name=viewport content="width=device-width,initial-scale=1"><meta name=description content><meta name=author content><title>Getting Started</title><link href=../css/bootstrap.css rel=stylesheet><link href=../css/markdown.css rel=stylesheet><link href=../css/katex.min.css rel=stylesheet><link href=../css/iceberg-theme.css rel=stylesheet><link href=../font-awesome-4.7.0/css/font-awesome.m [...]
+<span class=sr-only>Toggle navigation</span>
+<span class=icon-bar></span>
+<span class=icon-bar></span>
+<span class=icon-bar></span></button>
+<a class="page-scroll navbar-brand" href=https://iceberg.apache.org/><img class=top-navbar-logo src=https://iceberg.apache.org/docs/1.1.0//img/iceberg-logo-icon.png> Apache Iceberg</a></div><div><input type=search class=form-control id=search-input placeholder=Search... maxlength=64 data-hotkeys=s/></div><div class=versions-dropdown><span>1.0.0</span> <i class="fa fa-chevron-down"></i><div class=versions-dropdown-content><ul><li class=versions-dropdown-selection><a href=https://iceberg.a [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Tables class=collapse><ul class=sub-menu><li><a href=../configuration/>Configuration</a></li><li><a href=../evolution/>Evolution</a></li><li><a href=../maintenance/>Maintenance</a></li><li><a href=../partitioning/>Partitioning</a></li><li><a href=../performance/>Performance</a></li><li><a href=../reliability/>Reliability</a></li><li><a href=../schemas/>Schemas</a></li></ul></div><li><a class=chevron-toggle data-toggle=collapse data-paren [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Spark class="collapse in"><ul class=sub-menu><li><a href=../spark-ddl/>DDL</a></li><li><a id=active href=../getting-started/>Getting Started</a></li><li><a href=../spark-procedures/>Procedures</a></li><li><a href=../spark-queries/>Queries</a></li><li><a href=../spark-structured-streaming/>Structured Streaming</a></li><li><a href=../spark-writes/>Writes</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-p [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Flink class=collapse><ul class=sub-menu><li><a href=../flink/>Enabling Iceberg in Flink</a></li><li><a href=../flink-connector/>Flink Connector</a></li></ul></div><li><a href=../hive/><span>Hive</span></a></li><li><a target=_blank href=https://trino.io/docs/current/connector/iceberg.html><span>Trino</span></a></li><li><a target=_blank href=https://prestodb.io/docs/current/connector/iceberg.html><span>Presto</span></a></li><li><a target=_ [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Integrations class=collapse><ul class=sub-menu><li><a href=../aws/>AWS</a></li><li><a href=../dell/>Dell</a></li><li><a href=../jdbc/>JDBC</a></li><li><a href=../nessie/>Nessie</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href=#API><span>API</span>
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=API class=collapse><ul class=sub-menu><li><a href=../java-api-quickstart/>Java Quickstart</a></li><li><a href=../api/>Java API</a></li><li><a href=../custom-catalog/>Java Custom Catalog</a></li></ul></div><li><a href=https://iceberg.apache.org/docs/1.1.0/../../javadoc/latest><span>Javadoc</span></a></li></div></div><div id=content class=markdown-body><div class=margin-for-toc><h1 id=getting-started>Getting Started</h1><p>The latest versi [...]
+We recommend you to get started with Spark to understand Iceberg concepts and features with examples.
+You can also view documentations of using Iceberg with other compute engine under the <strong>Engines</strong> tab.</p><h2 id=using-iceberg-in-spark-3>Using Iceberg in Spark 3</h2><p>To use Iceberg in a Spark shell, use the <code>--packages</code> option:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sh data-lang=sh><span style=display:flex><span>spark-shell --packages org.apache.iceberg: [...]
+</span></span></code></pre></div><div class=info>If you want to include Iceberg in your Spark installation, add the <a href=spark-runtime-jar><code>iceberg-spark-runtime-3.2_2.12</code> Jar</a> to Spark&rsquo;s <code>jars</code> folder.</div><h3 id=adding-catalogs>Adding catalogs</h3><p>Iceberg comes with <a href=../spark-configuration#catalogs>catalogs</a> that enable SQL commands to manage tables and load them by name. Catalogs are configured using properties under <code>spark.sql.cata [...]
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    --conf spark.sql.extensions<span style=color:#f92672>=</span>org.apache.iceberg.spark.extensions.IcebergSparkSessionExtensions <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    --conf spark.sql.catalog.spark_catalog<span style=color:#f92672>=</span>org.apache.iceberg.spark.SparkSessionCatalog <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    --conf spark.sql.catalog.spark_catalog.type<span style=color:#f92672>=</span>hive <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    --conf spark.sql.catalog.local<span style=color:#f92672>=</span>org.apache.iceberg.spark.SparkCatalog <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    --conf spark.sql.catalog.local.type<span style=color:#f92672>=</span>hadoop <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    --conf spark.sql.catalog.local.warehouse<span style=color:#f92672>=</span>$PWD/warehouse
+</span></span></code></pre></div><h3 id=creating-a-table>Creating a table</h3><p>To create your first Iceberg table in Spark, use the <code>spark-sql</code> shell or <code>spark.sql(...)</code> to run a <a href=../spark-ddl#create-table><code>CREATE TABLE</code></a> command:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#7 [...]
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span><span style=color:#66d9ef>CREATE</span> <span style=color:#66d9ef>TABLE</span> <span style=color:#66d9ef>local</span>.db.<span style=color:#66d9ef>table</span> (id bigint, <span style=color:#66d9ef>data</span> string) <span style=color:#66d9ef>USING</span> iceberg
+</span></span></code></pre></div><p>Iceberg catalogs support the full range of SQL DDL commands, including:</p><ul><li><a href=../spark-ddl#create-table><code>CREATE TABLE ... PARTITIONED BY</code></a></li><li><a href=../spark-ddl#create-table--as-select><code>CREATE TABLE ... AS SELECT</code></a></li><li><a href=../spark-ddl#alter-table><code>ALTER TABLE</code></a></li><li><a href=../spark-ddl#drop-table><code>DROP TABLE</code></a></li></ul><h3 id=writing>Writing</h3><p>Once your table  [...]
+</span></span><span style=display:flex><span><span style=color:#66d9ef>INSERT</span> <span style=color:#66d9ef>INTO</span> <span style=color:#66d9ef>local</span>.db.<span style=color:#66d9ef>table</span> <span style=color:#66d9ef>SELECT</span> id, <span style=color:#66d9ef>data</span> <span style=color:#66d9ef>FROM</span> <span style=color:#66d9ef>source</span> <span style=color:#66d9ef>WHERE</span> <span style=color:#66d9ef>length</span>(<span style=color:#66d9ef>data</span>) <span styl [...]
+</span></span></code></pre></div><p>Iceberg also adds row-level SQL updates to Spark, <a href=../spark-writes#merge-into><code>MERGE INTO</code></a> and <a href=../spark-writes#delete-from><code>DELETE FROM</code></a>:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span>MERGE <span style=color:#66d9ef>INTO</span> <span style=color:#66d9ef>local</ [...]
+</span></span><span style=display:flex><span><span style=color:#66d9ef>WHEN</span> MATCHED <span style=color:#66d9ef>THEN</span> <span style=color:#66d9ef>UPDATE</span> <span style=color:#66d9ef>SET</span> t.<span style=color:#66d9ef>count</span> <span style=color:#f92672>=</span> t.<span style=color:#66d9ef>count</span> <span style=color:#f92672>+</span> u.<span style=color:#66d9ef>count</span>
+</span></span><span style=display:flex><span><span style=color:#66d9ef>WHEN</span> <span style=color:#66d9ef>NOT</span> MATCHED <span style=color:#66d9ef>THEN</span> <span style=color:#66d9ef>INSERT</span> <span style=color:#f92672>*</span>
+</span></span></code></pre></div><p>Iceberg supports writing DataFrames using the new <a href=../spark-writes#writing-with-dataframes>v2 DataFrame write API</a>:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-scala data-lang=scala><span style=display:flex><span>spark<span style=color:#f92672>.</span>table<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;source&#34;</span><sp [...]
+</span></span><span style=display:flex><span>     <span style=color:#f92672>.</span>writeTo<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;local.db.table&#34;</span><span style=color:#f92672>).</span>append<span style=color:#f92672>()</span>
+</span></span></code></pre></div><p>The old <code>write</code> API is supported, but <em>not</em> recommended.</p><h3 id=reading>Reading</h3><p>To read with SQL, use the an Iceberg table name in a <code>SELECT</code> query:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>SELECT</span> <span style=color:#66d9ef>count< [...]
+</span></span><span style=display:flex><span><span style=color:#66d9ef>FROM</span> <span style=color:#66d9ef>local</span>.db.<span style=color:#66d9ef>table</span>
+</span></span><span style=display:flex><span><span style=color:#66d9ef>GROUP</span> <span style=color:#66d9ef>BY</span> <span style=color:#66d9ef>data</span>
+</span></span></code></pre></div><p>SQL is also the recommended way to <a href=../spark-queries#inspecting-tables>inspect tables</a>. To view all of the snapshots in a table, use the <code>snapshots</code> metadata table:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>SELECT</span> <span style=color:#f92672>*</span> [...]
+</span></span></code></pre></div><pre tabindex=0><code>+-------------------------+----------------+-----------+-----------+----------------------------------------------------+-----+
+| committed_at            | snapshot_id    | parent_id | operation | manifest_list                                      | ... |
++-------------------------+----------------+-----------+-----------+----------------------------------------------------+-----+
+| 2019-02-08 03:29:51.215 | 57897183625154 | null      | append    | s3://.../table/metadata/snap-57897183625154-1.avro | ... |
+|                         |                |           |           |                                                    | ... |
+|                         |                |           |           |                                                    | ... |
+| ...                     | ...            | ...       | ...       | ...                                                | ... |
++-------------------------+----------------+-----------+-----------+----------------------------------------------------+-----+
+</code></pre><p><a href=../spark-queries#querying-with-dataframes>DataFrame reads</a> are supported and can now reference tables by name using <code>spark.table</code>:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-scala data-lang=scala><span style=display:flex><span><span style=color:#66d9ef>val</span> df <span style=color:#66d9ef>=</span> spark<span style=color:#f92672>.</span>table<spa [...]
+</span></span><span style=display:flex><span>df<span style=color:#f92672>.</span>count<span style=color:#f92672>()</span>
+</span></span></code></pre></div><h3 id=next-steps>Next steps</h3><p>Next, you can learn more about Iceberg tables in Spark:</p><ul><li><a href=../spark-ddl>DDL commands</a>: <code>CREATE</code>, <code>ALTER</code>, and <code>DROP</code></li><li><a href=../spark-queries>Querying data</a>: <code>SELECT</code> queries and metadata tables</li><li><a href=../spark-writes>Writing data</a>: <code>INSERT INTO</code> and <code>MERGE INTO</code></li><li><a href=../spark-procedures>Maintaining tab [...]
+<script src=https://iceberg.apache.org/docs/1.1.0//js/jquery.easing.min.js></script>
+<script type=text/javascript src=https://iceberg.apache.org/docs/1.1.0//js/search.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/bootstrap.min.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/iceberg-theme.js></script></html>
\ No newline at end of file
diff --git a/docs/1.1.0/hive/index.html b/docs/1.1.0/hive/index.html
new file mode 100644
index 00000000..23cef0a8
--- /dev/null
+++ b/docs/1.1.0/hive/index.html
@@ -0,0 +1,150 @@
+<!doctype html><html><head><meta charset=utf-8><meta http-equiv=x-ua-compatible content="IE=edge"><meta name=viewport content="width=device-width,initial-scale=1"><meta name=description content><meta name=author content><title>Hive</title><link href=../css/bootstrap.css rel=stylesheet><link href=../css/markdown.css rel=stylesheet><link href=../css/katex.min.css rel=stylesheet><link href=../css/iceberg-theme.css rel=stylesheet><link href=../font-awesome-4.7.0/css/font-awesome.min.css rel= [...]
+<span class=sr-only>Toggle navigation</span>
+<span class=icon-bar></span>
+<span class=icon-bar></span>
+<span class=icon-bar></span></button>
+<a class="page-scroll navbar-brand" href=https://iceberg.apache.org/><img class=top-navbar-logo src=https://iceberg.apache.org/docs/1.1.0//img/iceberg-logo-icon.png> Apache Iceberg</a></div><div><input type=search class=form-control id=search-input placeholder=Search... maxlength=64 data-hotkeys=s/></div><div class=versions-dropdown><span>1.0.0</span> <i class="fa fa-chevron-down"></i><div class=versions-dropdown-content><ul><li class=versions-dropdown-selection><a href=https://iceberg.a [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Tables class=collapse><ul class=sub-menu><li><a href=../configuration/>Configuration</a></li><li><a href=../evolution/>Evolution</a></li><li><a href=../maintenance/>Maintenance</a></li><li><a href=../partitioning/>Partitioning</a></li><li><a href=../performance/>Performance</a></li><li><a href=../reliability/>Reliability</a></li><li><a href=../schemas/>Schemas</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collaps [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Spark class=collapse><ul class=sub-menu><li><a href=../spark-ddl/>DDL</a></li><li><a href=../getting-started/>Getting Started</a></li><li><a href=../spark-procedures/>Procedures</a></li><li><a href=../spark-queries/>Queries</a></li><li><a href=../spark-structured-streaming/>Structured Streaming</a></li><li><a href=../spark-writes/>Writes</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Flink class=collapse><ul class=sub-menu><li><a href=../flink/>Enabling Iceberg in Flink</a></li><li><a href=../flink-connector/>Flink Connector</a></li></ul></div><li><a id=active href=../hive/><span>Hive</span></a></li><li><a target=_blank href=https://trino.io/docs/current/connector/iceberg.html><span>Trino</span></a></li><li><a target=_blank href=https://prestodb.io/docs/current/connector/iceberg.html><span>Presto</span></a></li><li>< [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Integrations class=collapse><ul class=sub-menu><li><a href=../aws/>AWS</a></li><li><a href=../dell/>Dell</a></li><li><a href=../jdbc/>JDBC</a></li><li><a href=../nessie/>Nessie</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href=#API><span>API</span>
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=API class=collapse><ul class=sub-menu><li><a href=../java-api-quickstart/>Java Quickstart</a></li><li><a href=../api/>Java API</a></li><li><a href=../custom-catalog/>Java Custom Catalog</a></li></ul></div><li><a href=https://iceberg.apache.org/docs/1.1.0/../../javadoc/latest><span>Javadoc</span></a></li></div></div><div id=content class=markdown-body><div class=margin-for-toc><h1 id=hive>Hive</h1><p>Iceberg supports reading and writing I [...]
+a <a href=https://cwiki.apache.org/confluence/display/Hive/StorageHandlers>StorageHandler</a>.</p><h2 id=feature-support>Feature support</h2><p>Iceberg compatibility with Hive 2.x and Hive 3.1.2/3 supports the following features:</p><ul><li>Creating a table</li><li>Dropping a table</li><li>Reading a table</li><li>Inserting into a table (INSERT INTO)</li></ul><div class=warning>DML operations work only with MapReduce execution engine.</div><p>With Hive version 4.0.0-alpha-1 and above,
+the Iceberg integration when using HiveCatalog supports the following additional features:</p><ul><li>Creating an Iceberg identity-partitioned table</li><li>Creating an Iceberg table with any partition spec, including the various transforms supported by Iceberg</li><li>Creating a table from an existing table (CTAS table)</li><li>Altering a table while keeping Iceberg and Hive schemas in sync</li><li>Altering the partition schema (updating columns)</li><li>Altering the partition schema by [...]
+Hive&rsquo;s classpath. These are provided by the <code>iceberg-hive-runtime</code> jar file. For example, if using the Hive shell, this
+can be achieved by issuing a statement like so:</p><pre tabindex=0><code>add jar /path/to/iceberg-hive-runtime.jar;
+</code></pre><p>There are many others ways to achieve this including adding the jar file to Hive&rsquo;s auxiliary classpath so it is
+available by default. Please refer to Hive&rsquo;s documentation for more information.</p><h4 id=enabling-support>Enabling support</h4><p>If the Iceberg storage handler is not in Hive&rsquo;s classpath, then Hive cannot load or update the metadata for an Iceberg
+table when the storage handler is set. To avoid the appearance of broken tables in Hive, Iceberg will not add the
+storage handler to a table unless Hive support is enabled. The storage handler is kept in sync (added or removed) every
+time Hive engine support for the table is updated, i.e. turned on or off in the table properties. There are two ways to
+enable Hive support: globally in Hadoop Configuration and per-table using a table property.</p><h5 id=hadoop-configuration>Hadoop configuration</h5><p>To enable Hive support globally for an application, set <code>iceberg.engine.hive.enabled=true</code> in its Hadoop configuration.
+For example, setting this in the <code>hive-site.xml</code> loaded by Spark will enable the storage handler for all tables created
+by Spark.</p><div class=danger>Starting with Apache Iceberg <code>0.11.0</code>, when using Hive with Tez you also have to disable
+vectorization (<code>hive.vectorized.execution.enabled=false</code>).</div><h5 id=table-property-configuration>Table property configuration</h5><p>Alternatively, the property <code>engine.hive.enabled</code> can be set to <code>true</code> and added to the table properties when creating
+the Iceberg table. Here is an example of doing it programmatically:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-java data-lang=java><span style=display:flex><span>Catalog catalog<span style=color:#f92672>=...;</span>
+</span></span><span style=display:flex><span>    Map<span style=color:#f92672>&lt;</span>String<span style=color:#f92672>,</span> String<span style=color:#f92672>&gt;</span> tableProperties<span style=color:#f92672>=</span>Maps<span style=color:#f92672>.</span><span style=color:#a6e22e>newHashMap</span><span style=color:#f92672>();</span>
+</span></span><span style=display:flex><span>    tableProperties<span style=color:#f92672>.</span><span style=color:#a6e22e>put</span><span style=color:#f92672>(</span>TableProperties<span style=color:#f92672>.</span><span style=color:#a6e22e>ENGINE_HIVE_ENABLED</span><span style=color:#f92672>,</span><span style=color:#e6db74>&#34;true&#34;</span><span style=color:#f92672>);</span> <span style=color:#75715e>// engine.hive.enabled=true
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>    catalog<span style=color:#f92672>.</span><span style=color:#a6e22e>createTable</span><span style=color:#f92672>(</span>tableId<span style=color:#f92672>,</span>schema<span style=color:#f92672>,</span>spec<span style=color:#f92672>,</span>tableProperties<span style=color:#f92672>);</span>
+</span></span></code></pre></div><p>The table level configuration overrides the global Hadoop configuration.</p><h5 id=hive-on-tez-configuration>Hive on Tez configuration</h5><p>To use the Tez engine on Hive <code>3.1.2</code> or later, Tez needs to be upgraded to >= <code>0.10.1</code> which contains a necessary fix <a href=https://issues.apache.org/jira/browse/TEZ-4248>TEZ-4248</a>.</p><p>To use the Tez engine on Hive <code>2.3.x</code>, you will need to manually build Tez from the <co [...]
+backwards incompatibility issue with Tez <code>0.10.1</code>.</p><p>You will also need to set the following property in the Hive
+configuration: <code>tez.mrreader.config.update.properties=hive.io.file.readcolumn.names,hive.io.file.readcolumn.ids</code>.</p><h2 id=catalog-management>Catalog Management</h2><h3 id=global-hive-catalog>Global Hive catalog</h3><p>From the Hive engine&rsquo;s perspective, there is only one global data catalog that is defined in the Hadoop configuration in
+the runtime environment. In contrast, Iceberg supports multiple different data catalog types such as Hive, Hadoop, AWS
+Glue, or custom catalog implementations. Iceberg also allows loading a table directly based on its path in the file
+system. Those tables do not belong to any catalog. Users might want to read these cross-catalog and path-based tables
+through the Hive engine for use cases like join.</p><p>To support this, a table in the Hive metastore can represent three different ways of loading an Iceberg table, depending
+on the table&rsquo;s <code>iceberg.catalog</code> property:</p><ol><li>The table will be loaded using a <code>HiveCatalog</code> that corresponds to the metastore configured in the Hive environment
+if no <code>iceberg.catalog</code> is set</li><li>The table will be loaded using a custom catalog if <code>iceberg.catalog</code> is set to a catalog name (see below)</li><li>The table can be loaded directly using the table&rsquo;s root location if <code>iceberg.catalog</code> is set
+to <code>location_based_table</code></li></ol><p>For cases 2 and 3 above, users can create an overlay of an Iceberg table in the Hive metastore, so that different table
+types can work together in the same Hive environment. See <a href=#create-external-table>CREATE EXTERNAL TABLE</a>
+and <a href=#create-table>CREATE TABLE</a> for more details.</p><h3 id=custom-iceberg-catalogs>Custom Iceberg catalogs</h3><p>To globally register different catalogs, set the following Hadoop configurations:</p><table><thead><tr><th>Config Key</th><th>Description</th></tr></thead><tbody><tr><td>iceberg.catalog.&lt;catalog_name>.type</td><td>type of catalog: <code>hive</code>, <code>hadoop</code>, or left unset if using a custom catalog</td></tr><tr><td>iceberg.catalog.&lt;catalog_name>.c [...]
+SET iceberg.catalog.another_hive.uri=thrift://example.com:9083;
+SET iceberg.catalog.another_hive.clients=10;
+SET iceberg.catalog.another_hive.warehouse=hdfs://example.com:8020/warehouse;
+</code></pre><p>Register a <code>HadoopCatalog</code> called <code>hadoop</code>:</p><pre tabindex=0><code>SET iceberg.catalog.hadoop.type=hadoop;
+SET iceberg.catalog.hadoop.warehouse=hdfs://example.com:8020/warehouse;
+</code></pre><p>Register an AWS <code>GlueCatalog</code> called <code>glue</code>:</p><pre tabindex=0><code>SET iceberg.catalog.glue.catalog-impl=org.apache.iceberg.aws.glue.GlueCatalog;
+SET iceberg.catalog.glue.warehouse=s3://my-bucket/my/key/prefix;
+SET iceberg.catalog.glue.lock.table=myGlueLockTable;
+</code></pre><h2 id=ddl-commands>DDL Commands</h2><p>Not all the features below are supported with Hive 2.3.x and Hive 3.1.x. Please refer to the
+<a href=#feature-support>Feature support</a> paragraph for further details.</p><p>One generally applicable difference is that Hive 4.0.0-alpha-1 provides the possibility to use
+<code>STORED BY ICEBERG</code> instead of the old <code>STORED BY 'org.apache.iceberg.mr.hive.HiveIcebergStorageHandler'</code></p><h3 id=create-table>CREATE TABLE</h3><h4 id=non-partitioned-tables>Non partitioned tables</h4><p>The Hive <code>CREATE EXTERNAL TABLE</code> command creates an Iceberg table when you specify the storage handler as follows:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class= [...]
+</span></span></code></pre></div><p>If you want to create external tables using CREATE TABLE, configure the MetaStoreMetadataTransformer on the cluster,
+and <code>CREATE TABLE</code> commands are transformed to create external tables. For example:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>CREATE</span> <span style=color:#66d9ef>TABLE</span> x (i int) STORED <span style=color:#66d9ef>BY</span> ICEBERG;
+</span></span></code></pre></div><p>You can specify the default file format (Avro, Parquet, ORC) at the time of the table creation.
+The default is Parquet:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>CREATE</span> <span style=color:#66d9ef>TABLE</span> x (i int) STORED <span style=color:#66d9ef>BY</span> ICEBERG STORED <span style=color:#66d9ef>AS</span> ORC;
+</span></span></code></pre></div><h4 id=partitioned-tables>Partitioned tables</h4><p>You can create Iceberg partitioned tables using a command familiar to those who create non-Iceberg tables:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>CREATE</span> <span style=color:#66d9ef>TABLE</span> x (i int) PARTITIONED <sp [...]
+</span></span></code></pre></div><div class=info>The resulting table does not create partitions in HMS, but instead, converts partition data into Iceberg identity partitions.</div><p>Use the DESCRIBE command to get information about the Iceberg identity partitions:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>DESC [...]
+</span></span></code></pre></div><p>The result is:</p><table><thead><tr><th>col_name</th><th>data_type</th><th>comment</th></tr></thead><tbody><tr><td>i</td><td>int</td><td></td></tr><tr><td>j</td><td>int</td><td></td></tr><tr><td></td><td>NULL</td><td>NULL</td></tr><tr><td># Partition Transform Information</td><td>NULL</td><td>NULL</td></tr><tr><td># col_name</td><td>transform_type</td><td>NULL</td></tr><tr><td>j</td><td>IDENTITY</td><td>NULL</td></tr></tbody></table><p>You can create I [...]
+(supported only in Hive 4.0.0-alpha-1):</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>CREATE</span> <span style=color:#66d9ef>TABLE</span> x (i int, ts <span style=color:#66d9ef>timestamp</span>) PARTITIONED <span style=color:#66d9ef>BY</span> SPEC (<span style=color:#66d9ef>month</span>(ts), bucket(<span style=col [...]
+</span></span><span style=display:flex><span><span style=color:#66d9ef>DESCRIBE</span> x;
+</span></span></code></pre></div><p>The result is:</p><table><thead><tr><th>col_name</th><th>data_type</th><th>comment</th></tr></thead><tbody><tr><td>i</td><td>int</td><td></td></tr><tr><td>ts</td><td>timestamp</td><td></td></tr><tr><td></td><td>NULL</td><td>NULL</td></tr><tr><td># Partition Transform Information</td><td>NULL</td><td>NULL</td></tr><tr><td># col_name</td><td>transform_type</td><td>NULL</td></tr><tr><td>ts</td><td>MONTH</td><td>NULL</td></tr><tr><td>i</td><td>BUCKET[2]</t [...]
+The Iceberg table and the corresponding Hive table are created at the beginning of the query execution.
+The data is inserted / committed when the query finishes. So for a transient period the table already exists but contains no data.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>CREATE</span> <span style=color:#66d9ef>TABLE</span> target PARTITIONED <span style=color:#66d9ef>BY</span> SPEC (<span style=color:#66d9ef [...]
+</span></span><span style=display:flex><span>    <span style=color:#66d9ef>SELECT</span> <span style=color:#f92672>*</span> <span style=color:#66d9ef>FROM</span> <span style=color:#66d9ef>source</span>;
+</span></span></code></pre></div><h3 id=create-external-table-overlaying-an-existing-iceberg-table>CREATE EXTERNAL TABLE overlaying an existing Iceberg table</h3><p>The <code>CREATE EXTERNAL TABLE</code> command is used to overlay a Hive table &ldquo;on top of&rdquo; an existing Iceberg table. Iceberg
+tables are created using either a <a href=../../../javadoc/1.0.0/index.html?org/apache/iceberg/catalog/Catalog.html><code>Catalog</code></a>, or an implementation of the <a href=../../../javadoc/1.0.0/index.html?org/apache/iceberg/Tables.html><code>Tables</code></a> interface, and Hive needs to be configured accordingly to
+operate on these different types of table.</p><h4 id=hive-catalog-tables>Hive catalog tables</h4><p>As described before, tables created by the <code>HiveCatalog</code> with Hive engine feature enabled are directly visible by the
+Hive engine, so there is no need to create an overlay.</p><h4 id=custom-catalog-tables>Custom catalog tables</h4><p>For a table in a registered catalog, specify the catalog name in the statement using table property <code>iceberg.catalog</code>.
+For example, the SQL below creates an overlay for a table in a <code>hadoop</code> type catalog named <code>hadoop_cat</code>:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>SET</span>
+</span></span><span style=display:flex><span>iceberg.<span style=color:#66d9ef>catalog</span>.hadoop_cat.<span style=color:#66d9ef>type</span><span style=color:#f92672>=</span>hadoop;
+</span></span><span style=display:flex><span><span style=color:#66d9ef>SET</span>
+</span></span><span style=display:flex><span>iceberg.<span style=color:#66d9ef>catalog</span>.hadoop_cat.warehouse<span style=color:#f92672>=</span>hdfs:<span style=color:#f92672>//</span>example.com:<span style=color:#ae81ff>8020</span><span style=color:#f92672>/</span>hadoop_cat;
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#66d9ef>CREATE</span>
+</span></span><span style=display:flex><span><span style=color:#66d9ef>EXTERNAL</span> <span style=color:#66d9ef>TABLE</span> database_a.table_a
+</span></span><span style=display:flex><span>STORED <span style=color:#66d9ef>BY</span> <span style=color:#e6db74>&#39;org.apache.iceberg.mr.hive.HiveIcebergStorageHandler&#39;</span>
+</span></span><span style=display:flex><span>TBLPROPERTIES (<span style=color:#e6db74>&#39;iceberg.catalog&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;hadoop_cat&#39;</span>);
+</span></span></code></pre></div><p>When <code>iceberg.catalog</code> is missing from both table properties and the global Hadoop configuration, <code>HiveCatalog</code> will be
+used as default.</p><h4 id=path-based-hadoop-tables>Path-based Hadoop tables</h4><p>Iceberg tables created using <code>HadoopTables</code> are stored entirely in a directory in a filesystem like HDFS. These tables
+are considered to have no catalog. To indicate that, set <code>iceberg.catalog</code> property to <code>location_based_table</code>. For
+example:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>CREATE</span>
+</span></span><span style=display:flex><span><span style=color:#66d9ef>EXTERNAL</span> <span style=color:#66d9ef>TABLE</span> table_a 
+</span></span><span style=display:flex><span>STORED <span style=color:#66d9ef>BY</span> <span style=color:#e6db74>&#39;org.apache.iceberg.mr.hive.HiveIcebergStorageHandler&#39;</span> 
+</span></span><span style=display:flex><span><span style=color:#66d9ef>LOCATION</span> <span style=color:#e6db74>&#39;hdfs://some_bucket/some_path/table_a&#39;</span>
+</span></span><span style=display:flex><span>TBLPROPERTIES (<span style=color:#e6db74>&#39;iceberg.catalog&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;location_based_table&#39;</span>);
+</span></span></code></pre></div><h4 id=create-table-overlaying-an-existing-iceberg-table>CREATE TABLE overlaying an existing Iceberg table</h4><p>You can also create a new table that is managed by a custom catalog. For example, the following code creates a table in
+a custom Hadoop catalog:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>SET</span>
+</span></span><span style=display:flex><span>iceberg.<span style=color:#66d9ef>catalog</span>.hadoop_cat.<span style=color:#66d9ef>type</span><span style=color:#f92672>=</span>hadoop;
+</span></span><span style=display:flex><span><span style=color:#66d9ef>SET</span>
+</span></span><span style=display:flex><span>iceberg.<span style=color:#66d9ef>catalog</span>.hadoop_cat.warehouse<span style=color:#f92672>=</span>hdfs:<span style=color:#f92672>//</span>example.com:<span style=color:#ae81ff>8020</span><span style=color:#f92672>/</span>hadoop_cat;
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#66d9ef>CREATE</span> <span style=color:#66d9ef>TABLE</span> database_a.table_a
+</span></span><span style=display:flex><span>(
+</span></span><span style=display:flex><span>    id   bigint,
+</span></span><span style=display:flex><span>    name string
+</span></span><span style=display:flex><span>) PARTITIONED <span style=color:#66d9ef>BY</span> (
+</span></span><span style=display:flex><span>  dept string
+</span></span><span style=display:flex><span>) STORED <span style=color:#66d9ef>BY</span> <span style=color:#e6db74>&#39;org.apache.iceberg.mr.hive.HiveIcebergStorageHandler&#39;</span>
+</span></span><span style=display:flex><span>TBLPROPERTIES (<span style=color:#e6db74>&#39;iceberg.catalog&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;hadoop_cat&#39;</span>);
+</span></span></code></pre></div><div class=danger>If the table to create already exists in the custom catalog, this will create a managed overlay
+table. This means technically you can omit the <code>EXTERNAL</code> keyword when creating an overlay table. However, this is <strong>not
+recommended</strong> because creating managed overlay tables could pose a risk to the shared data files in case of accidental
+drop table commands from the Hive side, which would unintentionally remove all the data in the table.</div><h3 id=alter-table>ALTER TABLE</h3><h4 id=table-properties>Table properties</h4><p>For HiveCatalog tables the Iceberg table properties and the Hive table properties stored in HMS are kept in sync.</p><div class=info>IMPORTANT: This feature is not available for other Catalog implementations.</div><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-t [...]
+</span></span></code></pre></div><h4 id=schema-evolution>Schema evolution</h4><p>The Hive table schema is kept in sync with the Iceberg table. If an outside source (Impala/Spark/Java API/etc)
+changes the schema, the Hive table immediately reflects the changes. You alter the table schema using Hive commands:</p><ul><li>Add a column</li></ul><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> orders <span style=color:#66d9ef>ADD</span> COLUMNS (nickname string);
+</span></span></code></pre></div><ul><li>Rename a column</li></ul><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> orders CHANGE <span style=color:#66d9ef>COLUMN</span> item fruit string;
+</span></span></code></pre></div><ul><li>Reorder columns</li></ul><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> orders CHANGE <span style=color:#66d9ef>COLUMN</span> quantity quantity int <span style=color:#66d9ef>AFTER</span> price;
+</span></span></code></pre></div><ul><li>Change a column type - only if the Iceberg defined the column type change as safe</li></ul><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> orders CHANGE <span style=color:#66d9ef>COLUMN</span> price price long;
+</span></span></code></pre></div><ul><li>Drop column by using REPLACE COLUMN to remove the old column</li></ul><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> orders <span style=color:#66d9ef>REPLACE</span> COLUMNS (remaining string);
+</span></span></code></pre></div><div class=info>Note, that dropping columns is only thing REPLACE COLUMNS can be used for
+i.e. if columns are specified out-of-order an error will be thrown signalling this limitation.</div><h4 id=partition-evolution>Partition evolution</h4><p>You change the partitioning schema using the following commands:</p><ul><li>Change the partitioning schema to new identity partitions:</li></ul><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span>< [...]
+</span></span></code></pre></div><ul><li>Alternatively, provide a partition specification:</li></ul><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> <span style=color:#66d9ef>order</span> <span style=color:#66d9ef>SET</span> PARTITION SPEC (<span style=color:#66d9ef>mon [...]
+</span></span></code></pre></div><h4 id=table-migration>Table migration</h4><p>You can migrate Avro / Parquet / ORC external tables to Iceberg tables using the following command:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> t <span style=color:#66d9ef>SET</span>  [...]
+</span></span></code></pre></div><p>During the migration the data files are not changed, only the appropriate Iceberg metadata files are created.
+After the migration, handle the table as a normal Iceberg table.</p><h3 id=truncate-table>TRUNCATE TABLE</h3><p>The following command truncates the Iceberg table:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>TRUNCATE</span> <span style=color:#66d9ef>TABLE</span> t;
+</span></span></code></pre></div><p>Using a partition specification is not allowed.</p><h3 id=drop-table>DROP TABLE</h3><p>Tables can be dropped using the <code>DROP TABLE</code> command:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>DROP</span> <span style=color:#66d9ef>TABLE</span> [<span style=color:#66d9ef>IF</ [...]
+</span></span></code></pre></div><h2 id=dml-commands>DML Commands</h2><h3 id=select>SELECT</h3><p>Select statements work the same on Iceberg tables in Hive. You will see the Iceberg benefits over Hive in compilation and execution:</p><ul><li><strong>No file system listings</strong> - especially important on blob stores, like S3</li><li><strong>No partition listing from</strong> the Metastore</li><li><strong>Advanced partition filtering</strong> - the partition keys are not needed in the  [...]
+Also currently the statistics stored in the MetaStore are used for query planning. This is something we are planning to improve in the future.</p><h3 id=insert-into>INSERT INTO</h3><p>Hive supports the standard single-table INSERT INTO operation:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>INSERT</span> <span sty [...]
+</span></span><span style=display:flex><span><span style=color:#66d9ef>VALUES</span> (<span style=color:#e6db74>&#39;a&#39;</span>, <span style=color:#ae81ff>1</span>);
+</span></span><span style=display:flex><span><span style=color:#66d9ef>INSERT</span> <span style=color:#66d9ef>INTO</span> table_a
+</span></span><span style=display:flex><span><span style=color:#66d9ef>SELECT</span>...;
+</span></span></code></pre></div><p>Multi-table insert is also supported, but it will not be atomic. Commits occur one table at a time.
+Partial changes will be visible during the commit process and failures can leave partial changes committed.
+Changes within a single table will remain atomic.</p><p>Here is an example of inserting into multiple tables at once in Hive SQL:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>FROM</span> customers
+</span></span><span style=display:flex><span>   <span style=color:#66d9ef>INSERT</span> <span style=color:#66d9ef>INTO</span> target1 <span style=color:#66d9ef>SELECT</span> customer_id, first_name
+</span></span><span style=display:flex><span>   <span style=color:#66d9ef>INSERT</span> <span style=color:#66d9ef>INTO</span> target2 <span style=color:#66d9ef>SELECT</span> last_name, customer_id;
+</span></span></code></pre></div><h3 id=insert-overwrite>INSERT OVERWRITE</h3><p>INSERT OVERWRITE can replace data in the table with the result of a query. Overwrites are atomic operations for Iceberg tables.
+For nonpartitioned tables the content of the table is always removed. For partitioned tables the partitions
+that have rows produced by the SELECT query will be replaced.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>INSERT</span> OVERWRITE <span style=color:#66d9ef>TABLE</span> target <span style=color:#66d9ef>SELECT</span> <span style=color:#f92672>*</span> <span style=color:#66d9ef>FROM</span> <span style=color:#66d9ef [...]
+</span></span></code></pre></div><h3 id=querying-metadata-tables>QUERYING METADATA TABLES</h3><p>Hive supports querying of the Iceberg Metadata tables. The tables could be used as normal
+Hive tables, so it is possible to use projections / joins / filters / etc.
+To reference a metadata table the full name of the table should be used, like:
+&lt;DB_NAME>.&lt;TABLE_NAME>.&lt;METADATA_TABLE_NAME>.</p><p>Currently the following metadata tables are available in Hive:</p><ul><li>files</li><li>entries</li><li>snapshots</li><li>manifests</li><li>partitions</li></ul><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>SELECT</span> <span style=color:#f92672>*</span> <sp [...]
+</span></span></code></pre></div><h3 id=timetravel>TIMETRAVEL</h3><p>Hive supports snapshot id based and time base timetravel queries.
+For these views it is possible to use projections / joins / filters / etc.
+The function is available with the following syntax:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>SELECT</span> <span style=color:#f92672>*</span> <span style=color:#66d9ef>FROM</span> table_a <span style=color:#66d9ef>FOR</span> SYSTEM_TIME <span style=color:#66d9ef>AS</span> <span style=color:#66d9ef>OF</span> < [...]
+</span></span><span style=display:flex><span><span style=color:#66d9ef>SELECT</span> <span style=color:#f92672>*</span> <span style=color:#66d9ef>FROM</span> table_a <span style=color:#66d9ef>FOR</span> SYSTEM_VERSION <span style=color:#66d9ef>AS</span> <span style=color:#66d9ef>OF</span> <span style=color:#ae81ff>1234567</span>;
+</span></span></code></pre></div><h2 id=type-compatibility>Type compatibility</h2><p>Hive and Iceberg support different set of types. Iceberg can perform type conversion automatically, but not for all
+combinations, so you may want to understand the type conversion in Iceberg in prior to design the types of columns in
+your tables. You can enable auto-conversion through Hadoop configuration (not enabled by default):</p><table><thead><tr><th>Config key</th><th>Default</th><th>Description</th></tr></thead><tbody><tr><td>iceberg.mr.schema.auto.conversion</td><td>false</td><td>if Hive should perform type auto-conversion</td></tr></tbody></table><h3 id=hive-type-to-iceberg-type>Hive type to Iceberg type</h3><p>This type conversion table describes how Hive types are converted to the Iceberg types. The conver [...]
+creating Iceberg table and writing to Iceberg table via Hive.</p><table><thead><tr><th>Hive</th><th>Iceberg</th><th>Notes</th></tr></thead><tbody><tr><td>boolean</td><td>boolean</td><td></td></tr><tr><td>short</td><td>integer</td><td>auto-conversion</td></tr><tr><td>byte</td><td>integer</td><td>auto-conversion</td></tr><tr><td>integer</td><td>integer</td><td></td></tr><tr><td>long</td><td>long</td><td></td></tr><tr><td>float</td><td>float</td><td></td></tr><tr><td>double</td><td>double</ [...]
+<script src=https://iceberg.apache.org/docs/1.1.0//js/jquery.easing.min.js></script>
+<script type=text/javascript src=https://iceberg.apache.org/docs/1.1.0//js/search.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/bootstrap.min.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/iceberg-theme.js></script></html>
\ No newline at end of file
diff --git a/docs/1.1.0/index.html b/docs/1.1.0/index.html
index 42439d7e..f43ccf8c 100644
--- a/docs/1.1.0/index.html
+++ b/docs/1.1.0/index.html
@@ -1,9 +1,19 @@
-<!doctype html><html><head><meta name=generator content="Hugo 0.107.0"><meta charset=utf-8><meta http-equiv=x-ua-compatible content="IE=edge"><meta name=viewport content="width=device-width,initial-scale=1"><meta name=description content><meta name=author content><title>Apache Iceberg</title><link href=./css/bootstrap.css rel=stylesheet><link href=./css/markdown.css rel=stylesheet><link href=./css/katex.min.css rel=stylesheet><link href=./css/iceberg-theme.css rel=stylesheet><link href=. [...]
+<!doctype html><html><head><meta name=generator content="Hugo 0.107.0"><meta charset=utf-8><meta http-equiv=x-ua-compatible content="IE=edge"><meta name=viewport content="width=device-width,initial-scale=1"><meta name=description content><meta name=author content><title>Introduction</title><link href=./css/bootstrap.css rel=stylesheet><link href=./css/markdown.css rel=stylesheet><link href=./css/katex.min.css rel=stylesheet><link href=./css/iceberg-theme.css rel=stylesheet><link href=./f [...]
 <span class=sr-only>Toggle navigation</span>
 <span class=icon-bar></span>
 <span class=icon-bar></span>
 <span class=icon-bar></span></button>
-<a class="page-scroll navbar-brand" href=https://iceberg.apache.org/><img class=top-navbar-logo src=https://iceberg.apache.org/docs/1.1.0//img/iceberg-logo-icon.png> Apache Iceberg</a></div><div><input type=search class=form-control id=search-input placeholder=Search... maxlength=64 data-hotkeys=s/></div><div class=versions-dropdown><span>1.0.0</span> <i class="fa fa-chevron-down"></i><div class=versions-dropdown-content><ul><li class=versions-dropdown-selection><a href=https://iceberg.a [...]
+<a class="page-scroll navbar-brand" href=https://iceberg.apache.org/><img class=top-navbar-logo src=https://iceberg.apache.org/docs/1.1.0//img/iceberg-logo-icon.png> Apache Iceberg</a></div><div><input type=search class=form-control id=search-input placeholder=Search... maxlength=64 data-hotkeys=s/></div><div class=versions-dropdown><span>1.0.0</span> <i class="fa fa-chevron-down"></i><div class=versions-dropdown-content><ul><li class=versions-dropdown-selection><a href=https://iceberg.a [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Tables class=collapse><ul class=sub-menu><li><a href=./configuration/>Configuration</a></li><li><a href=./evolution/>Evolution</a></li><li><a href=./maintenance/>Maintenance</a></li><li><a href=./partitioning/>Partitioning</a></li><li><a href=./performance/>Performance</a></li><li><a href=./reliability/>Reliability</a></li><li><a href=./schemas/>Schemas</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data- [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Spark class=collapse><ul class=sub-menu><li><a href=./spark-ddl/>DDL</a></li><li><a href=./getting-started/>Getting Started</a></li><li><a href=./spark-procedures/>Procedures</a></li><li><a href=./spark-queries/>Queries</a></li><li><a href=./spark-structured-streaming/>Structured Streaming</a></li><li><a href=./spark-writes/>Writes</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href=#Flin [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Flink class=collapse><ul class=sub-menu><li><a href=./flink/>Enabling Iceberg in Flink</a></li><li><a href=./flink-connector/>Flink Connector</a></li></ul></div><li><a href=./hive/><span>Hive</span></a></li><li><a target=_blank href=https://trino.io/docs/current/connector/iceberg.html><span>Trino</span></a></li><li><a target=_blank href=https://prestodb.io/docs/current/connector/iceberg.html><span>Presto</span></a></li><li><a target=_bla [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Integrations class=collapse><ul class=sub-menu><li><a href=./aws/>AWS</a></li><li><a href=./dell/>Dell</a></li><li><a href=./jdbc/>JDBC</a></li><li><a href=./nessie/>Nessie</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href=#API><span>API</span>
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=API class=collapse><ul class=sub-menu><li><a href=./java-api-quickstart/>Java Quickstart</a></li><li><a href=./api/>Java API</a></li><li><a href=./custom-catalog/>Java Custom Catalog</a></li></ul></div><li><a href=https://iceberg.apache.org/docs/1.1.0/../../javadoc/latest><span>Javadoc</span></a></li></div></div><div id=content class=markdown-body><div class=margin-for-toc><h1 id=documentation>Documentation</h1><p><strong>Apache Iceberg  [...]
 <script src=https://iceberg.apache.org/docs/1.1.0//js/jquery.easing.min.js></script>
 <script type=text/javascript src=https://iceberg.apache.org/docs/1.1.0//js/search.js></script>
 <script src=https://iceberg.apache.org/docs/1.1.0//js/bootstrap.min.js></script>
diff --git a/docs/1.1.0/index.xml b/docs/1.1.0/index.xml
index c4eb2694..27874d8e 100644
--- a/docs/1.1.0/index.xml
+++ b/docs/1.1.0/index.xml
@@ -1 +1,48 @@
-<?xml version="1.0" encoding="utf-8" standalone="yes"?><rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom"><channel><title>Apache Iceberg</title><link>https://iceberg.apache.org/docs/1.1.0/</link><description>Recent content on Apache Iceberg</description><generator>Hugo -- gohugo.io</generator><language>en-us</language><atom:link href="https://iceberg.apache.org/docs/1.1.0/index.xml" rel="self" type="application/rss+xml"/></channel></rss>
\ No newline at end of file
+<?xml version="1.0" encoding="utf-8" standalone="yes"?><rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom"><channel><title>Introduction on Apache Iceberg</title><link>https://iceberg.apache.org/docs/1.1.0/</link><description>Recent content in Introduction on Apache Iceberg</description><generator>Hugo -- gohugo.io</generator><language>en-us</language><atom:link href="https://iceberg.apache.org/docs/1.1.0/index.xml" rel="self" type="application/rss+xml"/><item><title>Getting Start [...]
+Spark is currently the most feature-rich compute engine for Iceberg operations. We recommend you to get started with Spark to understand Iceberg concepts and features with examples. You can also view documentations of using Iceberg with other compute engine under the Engines tab.
+Using Iceberg in Spark 3 To use Iceberg in a Spark shell, use the --packages option:
+spark-shell --packages org.</description></item><item><title>Hive</title><link>https://iceberg.apache.org/docs/1.1.0/hive/</link><pubDate>Mon, 01 Jan 0001 00:00:00 +0000</pubDate><guid>https://iceberg.apache.org/docs/1.1.0/hive/</guid><description>Hive Iceberg supports reading and writing Iceberg tables through Hive by using a StorageHandler.
+Feature support Iceberg compatibility with Hive 2.x and Hive 3.1.2/3 supports the following features:
+Creating a table Dropping a table Reading a table Inserting into a table (INSERT INTO) DML operations work only with MapReduce execution engine. With Hive version 4.0.0-alpha-1 and above, the Iceberg integration when using HiveCatalog supports the following additional features:
+Creating an Iceberg identity-partitioned table Creating an Iceberg table with any partition spec, including the various transforms supported by Iceberg Creating a table from an existing table (CTAS table) Altering a table while keeping Iceberg and Hive schemas in sync Altering the partition schema (updating columns) Altering the partition schema by specifying partition transforms Truncating a table Migrating tables in Avro, Parquet, or ORC (Non-ACID) format to Iceberg Reading the schema  [...]
+Enabling AWS Integration The iceberg-aws module is bundled with Spark and Flink engine runtimes for all versions from 0.11.0 onwards. However, the AWS clients are not bundled so that you can use the same client version as your application. You will need to provide the AWS v2 SDK because that is what Iceberg depends on.</description></item><item><title>Configuration</title><link>https://iceberg.apache.org/docs/1.1.0/configuration/</link><pubDate>Mon, 01 Jan 0001 00:00:00 +0000</pubDate><g [...]
+Read properties Property Default Description read.split.target-size 134217728 (128 MB) Target size when combining data input splits read.split.metadata-target-size 33554432 (32 MB) Target size when combining metadata input splits read.split.planning-lookback 10 Number of bins to consider when combining input splits read.split.open-file-cost 4194304 (4 MB) The estimated cost to open a file, used as a minimum weight when combining splits.</description></item><item><title>Configuration</tit [...]
+This creates an Iceberg catalog named hive_prod that loads tables from a Hive metastore:
+spark.sql.catalog.hive_prod = org.apache.iceberg.spark.SparkCatalog spark.sql.catalog.hive_prod.type = hive spark.sql.catalog.hive_prod.uri = thrift://metastore-host:port # omit uri to use the same URI as Spark: hive.metastore.uris in hive-site.xml Iceberg also supports a directory-based catalog in HDFS that can be configured using type=hadoop:</description></item><item><title>DDL</title><link>https://iceberg.apache.org/docs/1.1.0/spark-ddl/</link><pubDate>Mon, 01 Jan 0001 00:00:00 +0000 [...]
+Iceberg uses Apache Spark&amp;rsquo;s DataSourceV2 API for data source and catalog implementations. Spark DSv2 is an evolving API with different levels of support in Spark versions. Spark 2.4 does not support SQL DDL.
+Spark 2.4 can&amp;rsquo;t create Iceberg tables with DDL, instead use Spark 3 or the Iceberg API. CREATE TABLE Spark 3 can create tables in any Iceberg catalog with the clause USING iceberg:</description></item><item><title>Dell</title><link>https://iceberg.apache.org/docs/1.1.0/dell/</link><pubDate>Mon, 01 Jan 0001 00:00:00 +0000</pubDate><guid>https://iceberg.apache.org/docs/1.1.0/dell/</guid><description>Iceberg Dell Integration Dell ECS Integration Iceberg can be used with Dell&amp;r [...]
+See Dell ECS for more information on Dell ECS.
+Parameters When using Dell ECS with Iceberg, these configuration parameters are required:
+Name Description ecs.s3.endpoint ECS S3 service endpoint ecs.s3.access-key-id ECS Username ecs.s3.secret-access-key S3 Secret Key warehouse The location of data and metadata The warehouse should use the following formats:</description></item><item><title>Enabling Iceberg in Flink</title><link>https://iceberg.apache.org/docs/1.1.0/flink/</link><pubDate>Mon, 01 Jan 0001 00:00:00 +0000</pubDate><guid>https://iceberg.apache.org/docs/1.1.0/flink/</guid><description>Flink Apache Iceberg suppor [...]
+Feature support Flink Notes SQL create catalog ✔️ SQL create database ✔️ SQL create table ✔️ SQL create table like ✔️ SQL alter table ✔️ Only support altering table properties, column and partition changes are not supported SQL drop_table ✔️ SQL select ✔️ Support both streaming and batch mode SQL insert into ✔️ ️ Support both streaming and batch mode SQL insert overwrite ✔️ ️ DataStream read ✔️ ️ DataStream append ✔️ ️ DataStream overwrite ✔️ ️ Metadata tables ️ Support Java API but does [...]
+For example, Hive table partitioning cannot change so moving from a daily partition layout to an hourly partition layout requires a new table. And because queries are dependent on partitions, queries must be rewritten for the new table.</description></item><item><title>Flink Connector</title><link>https://iceberg.apache.org/docs/1.1.0/flink-connector/</link><pubDate>Mon, 01 Jan 0001 00:00:00 +0000</pubDate><guid>https://iceberg.apache.org/docs/1.1.0/flink-connector/</guid><description>Fl [...]
+In Flink, the SQL CREATE TABLE test (..) WITH ('connector'='iceberg', ...) will create a Flink table in current Flink catalog (use GenericInMemoryCatalog by default), which is just mapping to the underlying iceberg table instead of maintaining iceberg table directly in current Flink catalog.</description></item><item><title>Java API</title><link>https://iceberg.apache.org/docs/1.1.0/api/</link><pubDate>Mon, 01 Jan 0001 00:00:00 +0000</pubDate><guid>https://iceberg.apache.org/docs/1.1.0/a [...]
+Table metadata and operations are accessed through the Table interface. This interface will return table information.
+Table metadata The Table interface provides access to the table metadata:
+schema returns the current table schema spec returns the current table partition spec properties returns a map of key-value properties currentSnapshot returns the current table snapshot snapshots returns all valid snapshots for the table snapshot(id) returns a specific snapshot by ID location returns the table&amp;rsquo;s base location Tables also provide refresh to update the table to the latest version, and expose helpers:</description></item><item><title>Java Custom Catalog</title><li [...]
+Custom TableOperations Custom Catalog Custom FileIO Custom LocationProvider Custom IcebergSource Custom table operations implementation Extend BaseMetastoreTableOperations to provide implementation on how to read and write metadata
+Example:
+class CustomTableOperations extends BaseMetastoreTableOperations { private String dbName; private String tableName; private Configuration conf; private FileIO fileIO; protected CustomTableOperations(Configuration conf, String dbName, String tableName) { this.</description></item><item><title>Java Quickstart</title><link>https://iceberg.apache.org/docs/1.1.0/java-api-quickstart/</link><pubDate>Mon, 01 Jan 0001 00:00:00 +0000</pubDate><guid>https://iceberg.apache.org/docs/1.1.0/java-api-qu [...]
+Using a Hive catalog The Hive catalog connects to a Hive metastore to keep track of Iceberg tables. You can initialize a Hive catalog with a name and some properties. (see: Catalog properties)
+Note: Currently, setConf is always required for hive catalogs, but this will change in the future.</description></item><item><title>JDBC</title><link>https://iceberg.apache.org/docs/1.1.0/jdbc/</link><pubDate>Mon, 01 Jan 0001 00:00:00 +0000</pubDate><guid>https://iceberg.apache.org/docs/1.1.0/jdbc/</guid><description>Iceberg JDBC Integration JDBC Catalog Iceberg supports using a table in a relational database to manage Iceberg tables through JDBC. The database that JDBC connects to must  [...]
+Configurations Because each database and database service provider might require different configurations, the JDBC catalog allows arbitrary configurations through:
+Property Default Description uri the JDBC connection string jdbc.</description></item><item><title>Maintenance</title><link>https://iceberg.apache.org/docs/1.1.0/maintenance/</link><pubDate>Mon, 01 Jan 0001 00:00:00 +0000</pubDate><guid>https://iceberg.apache.org/docs/1.1.0/maintenance/</guid><description>Maintenance Maintenance operations require the Table instance. Please refer Java API quickstart page to refer how to load an existing table. Recommended Maintenance Expire Snapshots Eac [...]
+Snapshots accumulate until they are expired by the expireSnapshots operation. Regularly expiring snapshots is recommended to delete data files that are no longer needed, and to keep the size of table metadata small.</description></item><item><title>Nessie</title><link>https://iceberg.apache.org/docs/1.1.0/nessie/</link><pubDate>Mon, 01 Jan 0001 00:00:00 +0000</pubDate><guid>https://iceberg.apache.org/docs/1.1.0/nessie/</guid><description>Iceberg Nessie Integration Iceberg provides integr [...]
+multi-table transactions git-like operations (eg branches, tags, commits) hive-like metastore capabilities See Project Nessie for more information on Nessie. Nessie requires a server to run, see Getting Started to start a Nessie server.
+Enabling Nessie Catalog The iceberg-nessie module is bundled with Spark and Flink runtimes for all versions from 0.</description></item><item><title>Partitioning</title><link>https://iceberg.apache.org/docs/1.1.0/partitioning/</link><pubDate>Mon, 01 Jan 0001 00:00:00 +0000</pubDate><guid>https://iceberg.apache.org/docs/1.1.0/partitioning/</guid><description>Partitioning What is partitioning? Partitioning is a way to make queries faster by grouping similar rows together when writing.
+For example, queries for log entries from a logs table would usually include a time range, like this query for logs between 10 and 12 AM:
+SELECT level, message FROM logs WHERE event_time BETWEEN &amp;#39;2018-12-01 10:00:00&amp;#39; AND &amp;#39;2018-12-01 12:00:00&amp;#39; Configuring the logs table to partition by the date of event_time will group log events into files with the same event date.</description></item><item><title>Performance</title><link>https://iceberg.apache.org/docs/1.1.0/performance/</link><pubDate>Mon, 01 Jan 0001 00:00:00 +0000</pubDate><guid>https://iceberg.apache.org/docs/1.1.0/performance/</guid><d [...]
+Planning in an Iceberg table fits on a single node because Iceberg&amp;rsquo;s metadata can be used to prune metadata files that aren&amp;rsquo;t needed, in addition to filtering data files that don&amp;rsquo;t contain matching data.</description></item><item><title>Procedures</title><link>https://iceberg.apache.org/docs/1.1.0/spark-procedures/</link><pubDate>Mon, 01 Jan 0001 00:00:00 +0000</pubDate><guid>https://iceberg.apache.org/docs/1.1.0/spark-procedures/</guid><description>Spark Pr [...]
+Usage Procedures can be used from any configured Iceberg catalog with CALL. All procedures are in the namespace system.
+CALL supports passing arguments by name (recommended) or by position. Mixing position and named arguments is not supported.
+Named arguments All procedure arguments are named. When passing arguments by name, arguments can be in any order and any optional argument can be omitted.</description></item><item><title>Queries</title><link>https://iceberg.apache.org/docs/1.1.0/spark-queries/</link><pubDate>Mon, 01 Jan 0001 00:00:00 +0000</pubDate><guid>https://iceberg.apache.org/docs/1.1.0/spark-queries/</guid><description>Spark Queries To use Iceberg in Spark, first configure Spark catalogs.
+Iceberg uses Apache Spark&amp;rsquo;s DataSourceV2 API for data source and catalog implementations. Spark DSv2 is an evolving API with different levels of support in Spark versions:
+Feature support Spark 3 Spark 2.4 Notes SELECT ✔️ DataFrame reads ✔️ ✔️ Metadata table SELECT ✔️ History metadata table ✔️ ✔️ Snapshots metadata table ✔️ ✔️ Files metadata table ✔️ ✔️ Manifests metadata table ✔️ ✔️ Partitions metadata table ✔️ ✔️ All metadata tables ✔️ ✔️ Querying with SQL In Spark 3, tables use identifiers that include a catalog name.</description></item><item><title>Reliability</title><link>https://iceberg.apache.org/docs/1.1.0/reliability/</link><pubDate>Mon, 01 Jan 0 [...]
+Hive tables track data files using both a central metastore for partitions and a file system for individual files. This makes atomic changes to a table&amp;rsquo;s contents impossible, and eventually consistent stores like S3 may return incorrect results due to the use of listing files to reconstruct the state of a table. It also requires job planning to make many slow listing calls: O(n) with the number of partitions.</description></item><item><title>Schemas</title><link>https://iceberg [...]
+Type Description Notes boolean True or false int 32-bit signed integers Can promote to long long 64-bit signed integers float 32-bit IEEE 754 floating point Can promote to double double 64-bit IEEE 754 floating point decimal(P,S) Fixed-point decimal; precision P, scale S Scale is fixed and precision must be 38 or less date Calendar date without timezone or time time Time of day without date, timezone Stored as microseconds timestamp Timestamp without timezone Stored as microseconds times [...]
+As of Spark 3, DataFrame reads and writes are supported.
+Feature support Spark 3 Spark 2.4 Notes DataFrame write ✔ ✔ Streaming Reads Iceberg supports processing incremental data in spark structured streaming jobs which starts from a historical timestamp:
+val df = spark.</description></item><item><title>Writes</title><link>https://iceberg.apache.org/docs/1.1.0/spark-writes/</link><pubDate>Mon, 01 Jan 0001 00:00:00 +0000</pubDate><guid>https://iceberg.apache.org/docs/1.1.0/spark-writes/</guid><description>Spark Writes To use Iceberg in Spark, first configure Spark catalogs.
+Some plans are only available when using Iceberg SQL extensions in Spark 3.
+Iceberg uses Apache Spark&amp;rsquo;s DataSourceV2 API for data source and catalog implementations. Spark DSv2 is an evolving API with different levels of support in Spark versions:
+Feature support Spark 3 Spark 2.4 Notes SQL insert into ✔️ SQL merge into ✔️ ⚠ Requires Iceberg Spark extensions SQL insert overwrite ✔️ SQL delete from ✔️ ⚠ Row-level delete requires Spark extensions SQL update ✔️ ⚠ Requires Iceberg Spark extensions DataFrame append ✔️ ✔️ DataFrame overwrite ✔️ ✔️ ⚠ Behavior changed in Spark 3 DataFrame CTAS and RTAS ✔️ Writing with SQL Spark 3 supports SQL INSERT INTO, MERGE INTO, and INSERT OVERWRITE, as well as the new DataFrameWriterV2 API.</descrip [...]
\ No newline at end of file
diff --git a/docs/1.1.0/java-api-quickstart/index.html b/docs/1.1.0/java-api-quickstart/index.html
new file mode 100644
index 00000000..27f9c076
--- /dev/null
+++ b/docs/1.1.0/java-api-quickstart/index.html
@@ -0,0 +1,88 @@
+<!doctype html><html><head><meta charset=utf-8><meta http-equiv=x-ua-compatible content="IE=edge"><meta name=viewport content="width=device-width,initial-scale=1"><meta name=description content><meta name=author content><title>Java Quickstart</title><link href=../css/bootstrap.css rel=stylesheet><link href=../css/markdown.css rel=stylesheet><link href=../css/katex.min.css rel=stylesheet><link href=../css/iceberg-theme.css rel=stylesheet><link href=../font-awesome-4.7.0/css/font-awesome.m [...]
+<span class=sr-only>Toggle navigation</span>
+<span class=icon-bar></span>
+<span class=icon-bar></span>
+<span class=icon-bar></span></button>
+<a class="page-scroll navbar-brand" href=https://iceberg.apache.org/><img class=top-navbar-logo src=https://iceberg.apache.org/docs/1.1.0//img/iceberg-logo-icon.png> Apache Iceberg</a></div><div><input type=search class=form-control id=search-input placeholder=Search... maxlength=64 data-hotkeys=s/></div><div class=versions-dropdown><span>1.0.0</span> <i class="fa fa-chevron-down"></i><div class=versions-dropdown-content><ul><li class=versions-dropdown-selection><a href=https://iceberg.a [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Tables class=collapse><ul class=sub-menu><li><a href=../configuration/>Configuration</a></li><li><a href=../evolution/>Evolution</a></li><li><a href=../maintenance/>Maintenance</a></li><li><a href=../partitioning/>Partitioning</a></li><li><a href=../performance/>Performance</a></li><li><a href=../reliability/>Reliability</a></li><li><a href=../schemas/>Schemas</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collaps [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Spark class=collapse><ul class=sub-menu><li><a href=../spark-ddl/>DDL</a></li><li><a href=../getting-started/>Getting Started</a></li><li><a href=../spark-procedures/>Procedures</a></li><li><a href=../spark-queries/>Queries</a></li><li><a href=../spark-structured-streaming/>Structured Streaming</a></li><li><a href=../spark-writes/>Writes</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Flink class=collapse><ul class=sub-menu><li><a href=../flink/>Enabling Iceberg in Flink</a></li><li><a href=../flink-connector/>Flink Connector</a></li></ul></div><li><a href=../hive/><span>Hive</span></a></li><li><a target=_blank href=https://trino.io/docs/current/connector/iceberg.html><span>Trino</span></a></li><li><a target=_blank href=https://prestodb.io/docs/current/connector/iceberg.html><span>Presto</span></a></li><li><a target=_ [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Integrations class=collapse><ul class=sub-menu><li><a href=../aws/>AWS</a></li><li><a href=../dell/>Dell</a></li><li><a href=../jdbc/>JDBC</a></li><li><a href=../nessie/>Nessie</a></li></ul></div><li><a class=chevron-toggle data-toggle=collapse data-parent=full href=#API><span>API</span>
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=API class="collapse in"><ul class=sub-menu><li><a id=active href=../java-api-quickstart/>Java Quickstart</a></li><li><a href=../api/>Java API</a></li><li><a href=../custom-catalog/>Java Custom Catalog</a></li></ul></div><li><a href=https://iceberg.apache.org/docs/1.1.0/../../javadoc/latest><span>Javadoc</span></a></li></div></div><div id=content class=markdown-body><div class=margin-for-toc><h1 id=java-api-quickstart>Java API Quickstart< [...]
+You can initialize a Hive catalog with a name and some properties.
+(see: <a href=../configuration/#catalog-properties>Catalog properties</a>)</p><p><strong>Note:</strong> Currently, <code>setConf</code> is always required for hive catalogs, but this will change in the future.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-java data-lang=java><span style=display:flex><span><span style=color:#f92672>import</span> org.apache.iceberg.hive.HiveCatalog<span sty [...]
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>HiveCatalog catalog <span style=color:#f92672>=</span> <span style=color:#66d9ef>new</span> HiveCatalog<span style=color:#f92672>();</span>
+</span></span><span style=display:flex><span>catalog<span style=color:#f92672>.</span><span style=color:#a6e22e>setConf</span><span style=color:#f92672>(</span>spark<span style=color:#f92672>.</span><span style=color:#a6e22e>sparkContext</span><span style=color:#f92672>().</span><span style=color:#a6e22e>hadoopConfiguration</span><span style=color:#f92672>());</span>  <span style=color:#75715e>// Configure using Spark&#39;s Hadoop configuration
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>
+</span></span><span style=display:flex><span>Map <span style=color:#f92672>&lt;</span>String<span style=color:#f92672>,</span> String<span style=color:#f92672>&gt;</span> properties <span style=color:#f92672>=</span> <span style=color:#66d9ef>new</span> HashMap<span style=color:#f92672>&lt;</span>String<span style=color:#f92672>,</span> String<span style=color:#f92672>&gt;();</span>
+</span></span><span style=display:flex><span>properties<span style=color:#f92672>.</span><span style=color:#a6e22e>put</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;warehouse&#34;</span><span style=color:#f92672>,</span> <span style=color:#e6db74>&#34;...&#34;</span><span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>properties<span style=color:#f92672>.</span><span style=color:#a6e22e>put</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;uri&#34;</span><span style=color:#f92672>,</span> <span style=color:#e6db74>&#34;...&#34;</span><span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>catalog<span style=color:#f92672>.</span><span style=color:#a6e22e>initialize</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;hive&#34;</span><span style=color:#f92672>,</span> properties<span style=color:#f92672>);</span>
+</span></span></code></pre></div><p>The <code>Catalog</code> interface defines methods for working with tables, like <code>createTable</code>, <code>loadTable</code>, <code>renameTable</code>, and <code>dropTable</code>. <code>HiveCatalog</code> implements the <code>Catalog</code> interface.</p><p>To create a table, pass an <code>Identifier</code> and a <code>Schema</code> along with other initial metadata:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272 [...]
+</span></span><span style=display:flex><span><span style=color:#f92672>import</span> org.apache.iceberg.catalog.TableIdentifier<span style=color:#f92672>;</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>TableIdentifier name <span style=color:#f92672>=</span> TableIdentifier<span style=color:#f92672>.</span><span style=color:#a6e22e>of</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;logging&#34;</span><span style=color:#f92672>,</span> <span style=color:#e6db74>&#34;logs&#34;</span><span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>Table table <span style=color:#f92672>=</span> catalog<span style=color:#f92672>.</span><span style=color:#a6e22e>createTable</span><span style=color:#f92672>(</span>name<span style=color:#f92672>,</span> schema<span style=color:#f92672>,</span> spec<span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#75715e>// or to load an existing table, use the following line
+</span></span></span><span style=display:flex><span><span style=color:#75715e>// Table table = catalog.loadTable(name);
+</span></span></span></code></pre></div><p>The logs <a href=#create-a-schema>schema</a> and <a href=#create-a-partition-spec>partition spec</a> are created below.</p><h3 id=using-a-hadoop-catalog>Using a Hadoop catalog</h3><p>A Hadoop catalog doesn&rsquo;t need to connect to a Hive MetaStore, but can only be used with HDFS or similar file systems that support atomic rename. Concurrent writes with a Hadoop catalog are not safe with a local FS or S3. To create a Hadoop catalog:</p><div cla [...]
+</span></span><span style=display:flex><span><span style=color:#f92672>import</span> org.apache.iceberg.hadoop.HadoopCatalog<span style=color:#f92672>;</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>Configuration conf <span style=color:#f92672>=</span> <span style=color:#66d9ef>new</span> Configuration<span style=color:#f92672>();</span>
+</span></span><span style=display:flex><span>String warehousePath <span style=color:#f92672>=</span> <span style=color:#e6db74>&#34;hdfs://host:8020/warehouse_path&#34;</span><span style=color:#f92672>;</span>
+</span></span><span style=display:flex><span>HadoopCatalog catalog <span style=color:#f92672>=</span> <span style=color:#66d9ef>new</span> HadoopCatalog<span style=color:#f92672>(</span>conf<span style=color:#f92672>,</span> warehousePath<span style=color:#f92672>);</span>
+</span></span></code></pre></div><p>Like the Hive catalog, <code>HadoopCatalog</code> implements <code>Catalog</code>, so it also has methods for working with tables, like <code>createTable</code>, <code>loadTable</code>, and <code>dropTable</code>.</p><p>This example creates a table with Hadoop catalog:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-java data-lang=java><span style=display [...]
+</span></span><span style=display:flex><span><span style=color:#f92672>import</span> org.apache.iceberg.catalog.TableIdentifier<span style=color:#f92672>;</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>TableIdentifier name <span style=color:#f92672>=</span> TableIdentifier<span style=color:#f92672>.</span><span style=color:#a6e22e>of</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;logging&#34;</span><span style=color:#f92672>,</span> <span style=color:#e6db74>&#34;logs&#34;</span><span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>Table table <span style=color:#f92672>=</span> catalog<span style=color:#f92672>.</span><span style=color:#a6e22e>createTable</span><span style=color:#f92672>(</span>name<span style=color:#f92672>,</span> schema<span style=color:#f92672>,</span> spec<span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#75715e>// or to load an existing table, use the following line
+</span></span></span><span style=display:flex><span><span style=color:#75715e>// Table table = catalog.loadTable(name);
+</span></span></span></code></pre></div><p>The logs <a href=#create-a-schema>schema</a> and <a href=#create-a-partition-spec>partition spec</a> are created below.</p><h3 id=using-hadoop-tables>Using Hadoop tables</h3><p>Iceberg also supports tables that are stored in a directory in HDFS. Concurrent writes with a Hadoop tables are not safe when stored in the local FS or S3. Directory tables don&rsquo;t support all catalog operations, like rename, so they use the <code>Tables</code> interf [...]
+</span></span><span style=display:flex><span><span style=color:#f92672>import</span> org.apache.iceberg.hadoop.HadoopTables<span style=color:#f92672>;</span>
+</span></span><span style=display:flex><span><span style=color:#f92672>import</span> org.apache.iceberg.Table<span style=color:#f92672>;</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>Configuration conf <span style=color:#f92672>=</span> <span style=color:#66d9ef>new</span> Configuration<span style=color:#f92672>();</span>
+</span></span><span style=display:flex><span>HadoopTables tables <span style=color:#f92672>=</span> <span style=color:#66d9ef>new</span> HadoopTables<span style=color:#f92672>(</span>conf<span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>Table table <span style=color:#f92672>=</span> tables<span style=color:#f92672>.</span><span style=color:#a6e22e>create</span><span style=color:#f92672>(</span>schema<span style=color:#f92672>,</span> spec<span style=color:#f92672>,</span> table_location<span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#75715e>// or to load an existing table, use the following line
+</span></span></span><span style=display:flex><span><span style=color:#75715e>// Table table = tables.load(table_location);
+</span></span></span></code></pre></div><div class=danger>Hadoop tables shouldn&rsquo;t be used with file systems that do not support atomic rename. Iceberg relies on rename to synchronize concurrent commits for directory tables.</div><h3 id=tables-in-spark>Tables in Spark</h3><p>Spark uses both <code>HiveCatalog</code> and <code>HadoopTables</code> to load tables. Hive is used when the identifier passed to <code>load</code> or <code>save</code> is not a path, otherwise Spark assumes it  [...]
+</span></span><span style=display:flex><span><span style=color:#f92672>import</span> org.apache.iceberg.types.Types<span style=color:#f92672>;</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>Schema schema <span style=color:#f92672>=</span> <span style=color:#66d9ef>new</span> Schema<span style=color:#f92672>(</span>
+</span></span><span style=display:flex><span>      Types<span style=color:#f92672>.</span><span style=color:#a6e22e>NestedField</span><span style=color:#f92672>.</span><span style=color:#a6e22e>required</span><span style=color:#f92672>(</span><span style=color:#ae81ff>1</span><span style=color:#f92672>,</span> <span style=color:#e6db74>&#34;level&#34;</span><span style=color:#f92672>,</span> Types<span style=color:#f92672>.</span><span style=color:#a6e22e>StringType</span><span style=col [...]
+</span></span><span style=display:flex><span>      Types<span style=color:#f92672>.</span><span style=color:#a6e22e>NestedField</span><span style=color:#f92672>.</span><span style=color:#a6e22e>required</span><span style=color:#f92672>(</span><span style=color:#ae81ff>2</span><span style=color:#f92672>,</span> <span style=color:#e6db74>&#34;event_time&#34;</span><span style=color:#f92672>,</span> Types<span style=color:#f92672>.</span><span style=color:#a6e22e>TimestampType</span><span s [...]
+</span></span><span style=display:flex><span>      Types<span style=color:#f92672>.</span><span style=color:#a6e22e>NestedField</span><span style=color:#f92672>.</span><span style=color:#a6e22e>required</span><span style=color:#f92672>(</span><span style=color:#ae81ff>3</span><span style=color:#f92672>,</span> <span style=color:#e6db74>&#34;message&#34;</span><span style=color:#f92672>,</span> Types<span style=color:#f92672>.</span><span style=color:#a6e22e>StringType</span><span style=c [...]
+</span></span><span style=display:flex><span>      Types<span style=color:#f92672>.</span><span style=color:#a6e22e>NestedField</span><span style=color:#f92672>.</span><span style=color:#a6e22e>optional</span><span style=color:#f92672>(</span><span style=color:#ae81ff>4</span><span style=color:#f92672>,</span> <span style=color:#e6db74>&#34;call_stack&#34;</span><span style=color:#f92672>,</span> Types<span style=color:#f92672>.</span><span style=color:#a6e22e>ListType</span><span style= [...]
+</span></span><span style=display:flex><span>    <span style=color:#f92672>);</span>
+</span></span></code></pre></div><p>When using the Iceberg API directly, type IDs are required. Conversions from other schema formats, like Spark, Avro, and Parquet will automatically assign new IDs.</p><p>When a table is created, all IDs in the schema are re-assigned to ensure uniqueness.</p><h3 id=convert-a-schema-from-avro>Convert a schema from Avro</h3><p>To create an Iceberg schema from an existing Avro schema, use converters in <code>AvroSchemaUtil</code>:</p><div class=highlight>< [...]
+</span></span><span style=display:flex><span><span style=color:#f92672>import</span> org.apache.avro.Schema.Parser<span style=color:#f92672>;</span>
+</span></span><span style=display:flex><span><span style=color:#f92672>import</span> org.apache.iceberg.avro.AvroSchemaUtil<span style=color:#f92672>;</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>Schema avroSchema <span style=color:#f92672>=</span> <span style=color:#66d9ef>new</span> Parser<span style=color:#f92672>().</span><span style=color:#a6e22e>parse</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;{\&#34;type\&#34;: \&#34;record\&#34; , ... }&#34;</span><span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>Schema icebergSchema <span style=color:#f92672>=</span> AvroSchemaUtil<span style=color:#f92672>.</span><span style=color:#a6e22e>toIceberg</span><span style=color:#f92672>(</span>avroSchema<span style=color:#f92672>);</span>
+</span></span></code></pre></div><h3 id=convert-a-schema-from-spark>Convert a schema from Spark</h3><p>To create an Iceberg schema from an existing table, use converters in <code>SparkSchemaUtil</code>:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-java data-lang=java><span style=display:flex><span><span style=color:#f92672>import</span> org.apache.iceberg.spark.SparkSchemaUtil<span style [...]
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>Schema schema <span style=color:#f92672>=</span> SparkSchemaUtil<span style=color:#f92672>.</span><span style=color:#a6e22e>schemaForTable</span><span style=color:#f92672>(</span>sparkSession<span style=color:#f92672>,</span> table_name<span style=color:#f92672>);</span>
+</span></span></code></pre></div><h2 id=partitioning>Partitioning</h2><h3 id=create-a-partition-spec>Create a partition spec</h3><p>Partition specs describe how Iceberg should group records into data files. Partition specs are created for a table&rsquo;s schema using a builder.</p><p>This example creates a partition spec for the <code>logs</code> table that partitions records by the hour of the log event&rsquo;s timestamp and by log level:</p><div class=highlight><pre tabindex=0 style=co [...]
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>PartitionSpec spec <span style=color:#f92672>=</span> PartitionSpec<span style=color:#f92672>.</span><span style=color:#a6e22e>builderFor</span><span style=color:#f92672>(</span>schema<span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>      <span style=color:#f92672>.</span><span style=color:#a6e22e>hour</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;event_time&#34;</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>      <span style=color:#f92672>.</span><span style=color:#a6e22e>identity</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;level&#34;</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>      <span style=color:#f92672>.</span><span style=color:#a6e22e>build</span><span style=color:#f92672>();</span>
+</span></span></code></pre></div><p>For more information on the different partition transforms that Iceberg offers, visit <a href=../../../spec#partitioning>this page</a>.</p></div><div id=toc class=markdown-body><div id=full><nav id=TableOfContents><ul><li><a href=#create-a-table>Create a table</a><ul><li><a href=#using-a-hive-catalog>Using a Hive catalog</a></li><li><a href=#using-a-hadoop-catalog>Using a Hadoop catalog</a></li><li><a href=#using-hadoop-tables>Using Hadoop tables</a></ [...]
+<script src=https://iceberg.apache.org/docs/1.1.0//js/jquery.easing.min.js></script>
+<script type=text/javascript src=https://iceberg.apache.org/docs/1.1.0//js/search.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/bootstrap.min.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/iceberg-theme.js></script></html>
\ No newline at end of file
diff --git a/docs/1.1.0/java/api/index.html b/docs/1.1.0/java/api/index.html
new file mode 100644
index 00000000..ab00ac1a
--- /dev/null
+++ b/docs/1.1.0/java/api/index.html
@@ -0,0 +1 @@
+<!doctype html><html lang=en-us><head><title>https://iceberg.apache.org/docs/1.1.0/api/</title><link rel=canonical href=https://iceberg.apache.org/docs/1.1.0/api/><meta name=robots content="noindex"><meta charset=utf-8><meta http-equiv=refresh content="0; url=https://iceberg.apache.org/docs/1.1.0/api/"></head></html>
\ No newline at end of file
diff --git a/docs/1.1.0/java/custom-catalog/index.html b/docs/1.1.0/java/custom-catalog/index.html
new file mode 100644
index 00000000..89b7b041
--- /dev/null
+++ b/docs/1.1.0/java/custom-catalog/index.html
@@ -0,0 +1 @@
+<!doctype html><html lang=en-us><head><title>https://iceberg.apache.org/docs/1.1.0/custom-catalog/</title><link rel=canonical href=https://iceberg.apache.org/docs/1.1.0/custom-catalog/><meta name=robots content="noindex"><meta charset=utf-8><meta http-equiv=refresh content="0; url=https://iceberg.apache.org/docs/1.1.0/custom-catalog/"></head></html>
\ No newline at end of file
diff --git a/docs/1.1.0/java/quickstart/index.html b/docs/1.1.0/java/quickstart/index.html
new file mode 100644
index 00000000..c56e20c5
--- /dev/null
+++ b/docs/1.1.0/java/quickstart/index.html
@@ -0,0 +1 @@
+<!doctype html><html lang=en-us><head><title>https://iceberg.apache.org/docs/1.1.0/java-api-quickstart/</title><link rel=canonical href=https://iceberg.apache.org/docs/1.1.0/java-api-quickstart/><meta name=robots content="noindex"><meta charset=utf-8><meta http-equiv=refresh content="0; url=https://iceberg.apache.org/docs/1.1.0/java-api-quickstart/"></head></html>
\ No newline at end of file
diff --git a/docs/1.1.0/jdbc/index.html b/docs/1.1.0/jdbc/index.html
new file mode 100644
index 00000000..7dddf61a
--- /dev/null
+++ b/docs/1.1.0/jdbc/index.html
@@ -0,0 +1,41 @@
+<!doctype html><html><head><meta charset=utf-8><meta http-equiv=x-ua-compatible content="IE=edge"><meta name=viewport content="width=device-width,initial-scale=1"><meta name=description content><meta name=author content><title>JDBC</title><link href=../css/bootstrap.css rel=stylesheet><link href=../css/markdown.css rel=stylesheet><link href=../css/katex.min.css rel=stylesheet><link href=../css/iceberg-theme.css rel=stylesheet><link href=../font-awesome-4.7.0/css/font-awesome.min.css rel= [...]
+<span class=sr-only>Toggle navigation</span>
+<span class=icon-bar></span>
+<span class=icon-bar></span>
+<span class=icon-bar></span></button>
+<a class="page-scroll navbar-brand" href=https://iceberg.apache.org/><img class=top-navbar-logo src=https://iceberg.apache.org/docs/1.1.0//img/iceberg-logo-icon.png> Apache Iceberg</a></div><div><input type=search class=form-control id=search-input placeholder=Search... maxlength=64 data-hotkeys=s/></div><div class=versions-dropdown><span>1.0.0</span> <i class="fa fa-chevron-down"></i><div class=versions-dropdown-content><ul><li class=versions-dropdown-selection><a href=https://iceberg.a [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Tables class=collapse><ul class=sub-menu><li><a href=../configuration/>Configuration</a></li><li><a href=../evolution/>Evolution</a></li><li><a href=../maintenance/>Maintenance</a></li><li><a href=../partitioning/>Partitioning</a></li><li><a href=../performance/>Performance</a></li><li><a href=../reliability/>Reliability</a></li><li><a href=../schemas/>Schemas</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collaps [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Spark class=collapse><ul class=sub-menu><li><a href=../spark-ddl/>DDL</a></li><li><a href=../getting-started/>Getting Started</a></li><li><a href=../spark-procedures/>Procedures</a></li><li><a href=../spark-queries/>Queries</a></li><li><a href=../spark-structured-streaming/>Structured Streaming</a></li><li><a href=../spark-writes/>Writes</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Flink class=collapse><ul class=sub-menu><li><a href=../flink/>Enabling Iceberg in Flink</a></li><li><a href=../flink-connector/>Flink Connector</a></li></ul></div><li><a href=../hive/><span>Hive</span></a></li><li><a target=_blank href=https://trino.io/docs/current/connector/iceberg.html><span>Trino</span></a></li><li><a target=_blank href=https://prestodb.io/docs/current/connector/iceberg.html><span>Presto</span></a></li><li><a target=_ [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Integrations class="collapse in"><ul class=sub-menu><li><a href=../aws/>AWS</a></li><li><a href=../dell/>Dell</a></li><li><a id=active href=../jdbc/>JDBC</a></li><li><a href=../nessie/>Nessie</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href=#API><span>API</span>
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=API class=collapse><ul class=sub-menu><li><a href=../java-api-quickstart/>Java Quickstart</a></li><li><a href=../api/>Java API</a></li><li><a href=../custom-catalog/>Java Custom Catalog</a></li></ul></div><li><a href=https://iceberg.apache.org/docs/1.1.0/../../javadoc/latest><span>Javadoc</span></a></li></div></div><div id=content class=markdown-body><div class=margin-for-toc><h1 id=iceberg-jdbc-integration>Iceberg JDBC Integration</h1>< [...]
+The database that JDBC connects to must support atomic transaction to allow the JDBC catalog implementation to
+properly support atomic Iceberg table commits and read serializable isolation.</p><h3 id=configurations>Configurations</h3><p>Because each database and database service provider might require different configurations,
+the JDBC catalog allows arbitrary configurations through:</p><table><thead><tr><th>Property</th><th>Default</th><th>Description</th></tr></thead><tbody><tr><td>uri</td><td></td><td>the JDBC connection string</td></tr><tr><td>jdbc.&lt;property_key></td><td></td><td>any key value pairs to configure the JDBC connection</td></tr></tbody></table><h3 id=examples>Examples</h3><h4 id=spark>Spark</h4><p>You can start a Spark session with a MySQL JDBC connection using the following configurations: [...]
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    --conf spark.sql.catalog.my_catalog<span style=color:#f92672>=</span>org.apache.iceberg.spark.SparkCatalog <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    --conf spark.sql.catalog.my_catalog.warehouse<span style=color:#f92672>=</span>s3://my-bucket/my/key/prefix <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    --conf spark.sql.catalog.my_catalog.catalog-impl<span style=color:#f92672>=</span>org.apache.iceberg.jdbc.JdbcCatalog <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    --conf spark.sql.catalog.my_catalog.uri<span style=color:#f92672>=</span>jdbc:mysql://test.1234567890.us-west-2.rds.amazonaws.com:3306/default <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    --conf spark.sql.catalog.my_catalog.jdbc.verifyServerCertificate<span style=color:#f92672>=</span>true <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    --conf spark.sql.catalog.my_catalog.jdbc.useSSL<span style=color:#f92672>=</span>true <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    --conf spark.sql.catalog.my_catalog.jdbc.user<span style=color:#f92672>=</span>admin <span style=color:#ae81ff>\
+</span></span></span><span style=display:flex><span><span style=color:#ae81ff></span>    --conf spark.sql.catalog.my_catalog.jdbc.password<span style=color:#f92672>=</span>pass
+</span></span></code></pre></div><h4 id=java-api>Java API</h4><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-java data-lang=java><span style=display:flex><span>Class<span style=color:#f92672>.</span><span style=color:#a6e22e>forName</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;com.mysql.cj.jdbc.Driver&#34;</span><span style=color:#f92672>);</span> <span style=color:# [...]
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>Map<span style=color:#f92672>&lt;</span>String<span style=color:#f92672>,</span> String<span style=color:#f92672>&gt;</span> properties <span style=color:#f92672>=</span> <span style=color:#66d9ef>new</span> HashMap<span style=color:#f92672>&lt;&gt;();</span>
+</span></span><span style=display:flex><span>properties<span style=color:#f92672>.</span><span style=color:#a6e22e>put</span><span style=color:#f92672>(</span>CatalogProperties<span style=color:#f92672>.</span><span style=color:#a6e22e>CATALOG_IMPL</span><span style=color:#f92672>,</span> JdbcCatalog<span style=color:#f92672>.</span><span style=color:#a6e22e>class</span><span style=color:#f92672>.</span><span style=color:#a6e22e>getName</span><span style=color:#f92672>());</span>
+</span></span><span style=display:flex><span>properties<span style=color:#f92672>.</span><span style=color:#a6e22e>put</span><span style=color:#f92672>(</span>CatalogProperties<span style=color:#f92672>.</span><span style=color:#a6e22e>URI</span><span style=color:#f92672>,</span> <span style=color:#e6db74>&#34;jdbc:mysql://localhost:3306/test&#34;</span><span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>properties<span style=color:#f92672>.</span><span style=color:#a6e22e>put</span><span style=color:#f92672>(</span>JdbcCatalog<span style=color:#f92672>.</span><span style=color:#a6e22e>PROPERTY_PREFIX</span> <span style=color:#f92672>+</span> <span style=color:#e6db74>&#34;user&#34;</span><span style=color:#f92672>,</span> <span style=color:#e6db74>&#34;admin&#34;</span><span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>properties<span style=color:#f92672>.</span><span style=color:#a6e22e>put</span><span style=color:#f92672>(</span>JdbcCatalog<span style=color:#f92672>.</span><span style=color:#a6e22e>PROPERTY_PREFIX</span> <span style=color:#f92672>+</span> <span style=color:#e6db74>&#34;password&#34;</span><span style=color:#f92672>,</span> <span style=color:#e6db74>&#34;pass&#34;</span><span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>properties<span style=color:#f92672>.</span><span style=color:#a6e22e>put</span><span style=color:#f92672>(</span>CatalogProperties<span style=color:#f92672>.</span><span style=color:#a6e22e>WAREHOUSE_LOCATION</span><span style=color:#f92672>,</span> <span style=color:#e6db74>&#34;s3://warehouse/path&#34;</span><span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>Configuration hadoopConf <span style=color:#f92672>=</span> <span style=color:#66d9ef>new</span> Configuration<span style=color:#f92672>();</span> <span style=color:#75715e>// configs if you use HadoopFileIO
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>JdbcCatalog catalog <span style=color:#f92672>=</span> CatalogUtil<span style=color:#f92672>.</span><span style=color:#a6e22e>buildIcebergCatalog</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;test_jdbc_catalog&#34;</span><span style=color:#f92672>,</span> properties<span style=color:#f92672>,</span> hadoopConf<span style=color:#f92672>);</span>
+</span></span></code></pre></div></div><div id=toc class=markdown-body><div id=full><nav id=TableOfContents><ul><li><a href=#jdbc-catalog>JDBC Catalog</a><ul><li><a href=#configurations>Configurations</a></li><li><a href=#examples>Examples</a></li></ul></li></ul></nav></div></div></div></div></section></body><script src=https://iceberg.apache.org/docs/1.1.0//js/jquery-1.11.0.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/jquery.easing.min.js></script>
+<script type=text/javascript src=https://iceberg.apache.org/docs/1.1.0//js/search.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/bootstrap.min.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/iceberg-theme.js></script></html>
\ No newline at end of file
diff --git a/docs/1.1.0/maintenance/index.html b/docs/1.1.0/maintenance/index.html
new file mode 100644
index 00000000..7b8be2fa
--- /dev/null
+++ b/docs/1.1.0/maintenance/index.html
@@ -0,0 +1,55 @@
+<!doctype html><html><head><meta charset=utf-8><meta http-equiv=x-ua-compatible content="IE=edge"><meta name=viewport content="width=device-width,initial-scale=1"><meta name=description content><meta name=author content><title>Maintenance</title><link href=../css/bootstrap.css rel=stylesheet><link href=../css/markdown.css rel=stylesheet><link href=../css/katex.min.css rel=stylesheet><link href=../css/iceberg-theme.css rel=stylesheet><link href=../font-awesome-4.7.0/css/font-awesome.min.c [...]
+<span class=sr-only>Toggle navigation</span>
+<span class=icon-bar></span>
+<span class=icon-bar></span>
+<span class=icon-bar></span></button>
+<a class="page-scroll navbar-brand" href=https://iceberg.apache.org/><img class=top-navbar-logo src=https://iceberg.apache.org/docs/1.1.0//img/iceberg-logo-icon.png> Apache Iceberg</a></div><div><input type=search class=form-control id=search-input placeholder=Search... maxlength=64 data-hotkeys=s/></div><div class=versions-dropdown><span>1.0.0</span> <i class="fa fa-chevron-down"></i><div class=versions-dropdown-content><ul><li class=versions-dropdown-selection><a href=https://iceberg.a [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Tables class="collapse in"><ul class=sub-menu><li><a href=../configuration/>Configuration</a></li><li><a href=../evolution/>Evolution</a></li><li><a id=active href=../maintenance/>Maintenance</a></li><li><a href=../partitioning/>Partitioning</a></li><li><a href=../performance/>Performance</a></li><li><a href=../reliability/>Reliability</a></li><li><a href=../schemas/>Schemas</a></li></ul></div><li><a class="chevron-toggle collapsed" data [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Spark class=collapse><ul class=sub-menu><li><a href=../spark-ddl/>DDL</a></li><li><a href=../getting-started/>Getting Started</a></li><li><a href=../spark-procedures/>Procedures</a></li><li><a href=../spark-queries/>Queries</a></li><li><a href=../spark-structured-streaming/>Structured Streaming</a></li><li><a href=../spark-writes/>Writes</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Flink class=collapse><ul class=sub-menu><li><a href=../flink/>Enabling Iceberg in Flink</a></li><li><a href=../flink-connector/>Flink Connector</a></li></ul></div><li><a href=../hive/><span>Hive</span></a></li><li><a target=_blank href=https://trino.io/docs/current/connector/iceberg.html><span>Trino</span></a></li><li><a target=_blank href=https://prestodb.io/docs/current/connector/iceberg.html><span>Presto</span></a></li><li><a target=_ [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Integrations class=collapse><ul class=sub-menu><li><a href=../aws/>AWS</a></li><li><a href=../dell/>Dell</a></li><li><a href=../jdbc/>JDBC</a></li><li><a href=../nessie/>Nessie</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href=#API><span>API</span>
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=API class=collapse><ul class=sub-menu><li><a href=../java-api-quickstart/>Java Quickstart</a></li><li><a href=../api/>Java API</a></li><li><a href=../custom-catalog/>Java Custom Catalog</a></li></ul></div><li><a href=https://iceberg.apache.org/docs/1.1.0/../../javadoc/latest><span>Javadoc</span></a></li></div></div><div id=content class=markdown-body><div class=margin-for-toc><h1 id=maintenance>Maintenance</h1><div class=info>Maintenance [...]
+</span></span><span style=display:flex><span><span style=color:#66d9ef>long</span> tsToExpire <span style=color:#f92672>=</span> System<span style=color:#f92672>.</span><span style=color:#a6e22e>currentTimeMillis</span><span style=color:#f92672>()</span> <span style=color:#f92672>-</span> <span style=color:#f92672>(</span><span style=color:#ae81ff>1000</span> <span style=color:#f92672>*</span> <span style=color:#ae81ff>60</span> <span style=color:#f92672>*</span> <span style=color:#ae81f [...]
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>table<span style=color:#f92672>.</span><span style=color:#a6e22e>expireSnapshots</span><span style=color:#f92672>()</span>
+</span></span><span style=display:flex><span>     <span style=color:#f92672>.</span><span style=color:#a6e22e>expireOlderThan</span><span style=color:#f92672>(</span>tsToExpire<span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>     <span style=color:#f92672>.</span><span style=color:#a6e22e>commit</span><span style=color:#f92672>();</span>
+</span></span></code></pre></div><p>See the <a href=../../../javadoc/1.0.0/org/apache/iceberg/ExpireSnapshots.html><code>ExpireSnapshots</code> Javadoc</a> to see more configuration options.</p><p>There is also a Spark action that can run table expiration in parallel for large tables:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-java data-lang=java><span style=display:flex><span>Table ta [...]
+</span></span><span style=display:flex><span>SparkActions
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>get</span><span style=color:#f92672>()</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>expireSnapshots</span><span style=color:#f92672>(</span>table<span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>expireOlderThan</span><span style=color:#f92672>(</span>tsToExpire<span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>execute</span><span style=color:#f92672>();</span>
+</span></span></code></pre></div><p>Expiring old snapshots removes them from metadata, so they are no longer available for time travel queries.</p><div class=info>Data files are not deleted until they are no longer referenced by a snapshot that may be used for time travel or rollback.
+Regularly expiring snapshots deletes unused data files.</div><h3 id=remove-old-metadata-files>Remove old metadata files</h3><p>Iceberg keeps track of table metadata using JSON files. Each change to a table produces a new metadata file to provide atomicity.</p><p>Old metadata files are kept for history by default. Tables with frequent commits, like those written by streaming jobs, may need to regularly clean metadata files.</p><p>To automatically clean metadata files, set <code>write.meta [...]
+</span></span><span style=display:flex><span>SparkActions
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>get</span><span style=color:#f92672>()</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>deleteOrphanFiles</span><span style=color:#f92672>(</span>table<span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>execute</span><span style=color:#f92672>();</span>
+</span></span></code></pre></div><p>See the <a href=../../../javadoc/1.0.0/org/apache/iceberg/actions/DeleteOrphanFiles.html>DeleteOrphanFiles Javadoc</a> to see more configuration options.</p><p>This action may take a long time to finish if you have lots of files in data and metadata directories. It is recommended to execute this periodically, but you may not need to execute this often.</p><div class=info>It is dangerous to remove orphan files with a retention interval shorter than the  [...]
+might corrupt the table if in-progress files are considered orphaned and are deleted. The default interval is 3 days.</div><div class=info>Iceberg uses the string representations of paths when determining which files need to be removed. On some file systems,
+the path can change over time, but it still represents the same file. For example, if you change authorities for an HDFS cluster,
+none of the old path urls used during creation will match those that appear in a current listing. <em>This will lead to data loss when
+RemoveOrphanFiles is run</em>. Please be sure the entries in your MetadataTables match those listed by the Hadoop
+FileSystem API to avoid unintentional deletion.</div><h2 id=optional-maintenance>Optional Maintenance</h2><p>Some tables require additional maintenance. For example, streaming queries may produce small data files that should be <a href=#compact-data-files>compacted into larger files</a>. And some tables can benefit from <a href=#rewrite-manifests>rewriting manifest files</a> to make locating data for queries much faster.</p><h3 id=compact-data-files>Compact data files</h3><p>Iceberg trac [...]
+</span></span><span style=display:flex><span>SparkActions
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>get</span><span style=color:#f92672>()</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>rewriteDataFiles</span><span style=color:#f92672>(</span>table<span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>filter</span><span style=color:#f92672>(</span>Expressions<span style=color:#f92672>.</span><span style=color:#a6e22e>equal</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;date&#34;</span><span style=color:#f92672>,</span> <span style=color:#e6db74>&#34;2020-08-18&#34;</span><span style=color:#f92672>))</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>option</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;target-file-size-bytes&#34;</span><span style=color:#f92672>,</span> Long<span style=color:#f92672>.</span><span style=color:#a6e22e>toString</span><span style=color:#f92672>(</span><span style=color:#ae81ff>500</span> <span style=color:#f92672>*</span> <span style=color:#ae81ff>1024</span> <span style=c [...]
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>execute</span><span style=color:#f92672>();</span>
+</span></span></code></pre></div><p>The <code>files</code> metadata table is useful for inspecting data file sizes and determining when to compact partitions.</p><p>See the <a href=../../../javadoc/1.0.0/org/apache/iceberg/actions/RewriteDataFiles.html><code>RewriteDataFiles</code> Javadoc</a> to see more configuration options.</p><h3 id=rewrite-manifests>Rewrite manifests</h3><p>Iceberg uses metadata in its manifest list and manifest files speed up query planning and to prune unnecessar [...]
+</span></span><span style=display:flex><span>SparkActions
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>get</span><span style=color:#f92672>()</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>rewriteManifests</span><span style=color:#f92672>(</span>table<span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>rewriteIf</span><span style=color:#f92672>(</span>file <span style=color:#f92672>-&gt;</span> file<span style=color:#f92672>.</span><span style=color:#a6e22e>length</span><span style=color:#f92672>()</span> <span style=color:#f92672>&lt;</span> <span style=color:#ae81ff>10</span> <span style=color:#f92672>*</span> <span style=color:#ae81ff>1024</span> <span style=color:#f92672>*</ [...]
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>    <span style=color:#f92672>.</span><span style=color:#a6e22e>execute</span><span style=color:#f92672>();</span>
+</span></span></code></pre></div><p>See the <a href=../../../javadoc/1.0.0/org/apache/iceberg/actions/RewriteManifests.html><code>RewriteManifests</code> Javadoc</a> to see more configuration options.</p></div><div id=toc class=markdown-body><div id=full><nav id=TableOfContents><ul><li><a href=#recommended-maintenance>Recommended Maintenance</a><ul><li><a href=#expire-snapshots>Expire Snapshots</a></li><li><a href=#remove-old-metadata-files>Remove old metadata files</a></li><li><a href=# [...]
+<script src=https://iceberg.apache.org/docs/1.1.0//js/jquery.easing.min.js></script>
+<script type=text/javascript src=https://iceberg.apache.org/docs/1.1.0//js/search.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/bootstrap.min.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/iceberg-theme.js></script></html>
\ No newline at end of file
diff --git a/docs/1.1.0/nessie/index.html b/docs/1.1.0/nessie/index.html
new file mode 100644
index 00000000..5c2e718e
--- /dev/null
+++ b/docs/1.1.0/nessie/index.html
@@ -0,0 +1,82 @@
+<!doctype html><html><head><meta charset=utf-8><meta http-equiv=x-ua-compatible content="IE=edge"><meta name=viewport content="width=device-width,initial-scale=1"><meta name=description content><meta name=author content><title>Nessie</title><link href=../css/bootstrap.css rel=stylesheet><link href=../css/markdown.css rel=stylesheet><link href=../css/katex.min.css rel=stylesheet><link href=../css/iceberg-theme.css rel=stylesheet><link href=../font-awesome-4.7.0/css/font-awesome.min.css re [...]
+<span class=sr-only>Toggle navigation</span>
+<span class=icon-bar></span>
+<span class=icon-bar></span>
+<span class=icon-bar></span></button>
+<a class="page-scroll navbar-brand" href=https://iceberg.apache.org/><img class=top-navbar-logo src=https://iceberg.apache.org/docs/1.1.0//img/iceberg-logo-icon.png> Apache Iceberg</a></div><div><input type=search class=form-control id=search-input placeholder=Search... maxlength=64 data-hotkeys=s/></div><div class=versions-dropdown><span>1.0.0</span> <i class="fa fa-chevron-down"></i><div class=versions-dropdown-content><ul><li class=versions-dropdown-selection><a href=https://iceberg.a [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Tables class=collapse><ul class=sub-menu><li><a href=../configuration/>Configuration</a></li><li><a href=../evolution/>Evolution</a></li><li><a href=../maintenance/>Maintenance</a></li><li><a href=../partitioning/>Partitioning</a></li><li><a href=../performance/>Performance</a></li><li><a href=../reliability/>Reliability</a></li><li><a href=../schemas/>Schemas</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collaps [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Spark class=collapse><ul class=sub-menu><li><a href=../spark-ddl/>DDL</a></li><li><a href=../getting-started/>Getting Started</a></li><li><a href=../spark-procedures/>Procedures</a></li><li><a href=../spark-queries/>Queries</a></li><li><a href=../spark-structured-streaming/>Structured Streaming</a></li><li><a href=../spark-writes/>Writes</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Flink class=collapse><ul class=sub-menu><li><a href=../flink/>Enabling Iceberg in Flink</a></li><li><a href=../flink-connector/>Flink Connector</a></li></ul></div><li><a href=../hive/><span>Hive</span></a></li><li><a target=_blank href=https://trino.io/docs/current/connector/iceberg.html><span>Trino</span></a></li><li><a target=_blank href=https://prestodb.io/docs/current/connector/iceberg.html><span>Presto</span></a></li><li><a target=_ [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Integrations class="collapse in"><ul class=sub-menu><li><a href=../aws/>AWS</a></li><li><a href=../dell/>Dell</a></li><li><a href=../jdbc/>JDBC</a></li><li><a id=active href=../nessie/>Nessie</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href=#API><span>API</span>
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=API class=collapse><ul class=sub-menu><li><a href=../java-api-quickstart/>Java Quickstart</a></li><li><a href=../api/>Java API</a></li><li><a href=../custom-catalog/>Java Custom Catalog</a></li></ul></div><li><a href=https://iceberg.apache.org/docs/1.1.0/../../javadoc/latest><span>Javadoc</span></a></li></div></div><div id=content class=markdown-body><div class=margin-for-toc><h1 id=iceberg-nessie-integration>Iceberg Nessie Integration</ [...]
+This section describes how to use Iceberg with Nessie. Nessie provides several key features on top of Iceberg:</p><ul><li>multi-table transactions</li><li>git-like operations (eg branches, tags, commits)</li><li>hive-like metastore capabilities</li></ul><p>See <a href=https://projectnessie.org>Project Nessie</a> for more information on Nessie. Nessie requires a server to run, see
+<a href=https://projectnessie.org/try/>Getting Started</a> to start a Nessie server.</p><h2 id=enabling-nessie-catalog>Enabling Nessie Catalog</h2><p>The <code>iceberg-nessie</code> module is bundled with Spark and Flink runtimes for all versions from <code>0.11.0</code>. To get started
+with Nessie (with spark-3.3) and Iceberg simply add the Iceberg runtime to your process. Eg: <code>spark-sql --packages org.apache.iceberg:iceberg-spark-runtime-3.3_2.12:1.0.0</code>.</p><h2 id=spark-sql-extensions>Spark SQL Extensions</h2><p>From Spark 3.3 (with scala 2.12), Nessie SQL extensions can be used to manage the Nessie repo as shown below.</p><pre tabindex=0><code>bin/spark-sql 
+  --packages &#34;org.apache.iceberg:iceberg-spark-runtime-3.3_2.12:1.0.0,org.projectnessie:nessie-spark-extensions:0.20.0&#34;
+  --conf spark.sql.extensions=&#34;org.apache.iceberg.spark.extensions.IcebergSparkSessionExtensions,org.projectnessie.spark.extensions.NessieSparkSessionExtensions&#34;
+  --conf &lt;other settings&gt;
+</code></pre><p>Please refer <a href=https://projectnessie.org/tools/sql/>Nessie SQL extension document</a> to learn more about it.</p><h2 id=nessie-catalog>Nessie Catalog</h2><p>One major feature introduced in release <code>0.11.0</code> is the ability to easily interact with a <a href=../custom-catalog>Custom
+Catalog</a> from Spark and Flink. See <a href=../spark-configuration#catalog-configuration>Spark Configuration</a>
+and <a href=../flink#custom-catalog>Flink Configuration</a> for instructions for adding a custom catalog to Iceberg.</p><p>To use the Nessie Catalog the following properties are required:</p><ul><li><code>warehouse</code>. Like most other catalogs the warehouse property is a file path to where this catalog should store tables.</li><li><code>uri</code>. This is the Nessie server base uri. Eg <code>http://localhost:19120/api/v1</code>.</li><li><code>ref</code> (optional). This is the Nessi [...]
+</span></span><span style=display:flex><span>options<span style=color:#f92672>.</span><span style=color:#a6e22e>put</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;warehouse&#34;</span><span style=color:#f92672>,</span> <span style=color:#e6db74>&#34;/path/to/warehouse&#34;</span><span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>options<span style=color:#f92672>.</span><span style=color:#a6e22e>put</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;ref&#34;</span><span style=color:#f92672>,</span> <span style=color:#e6db74>&#34;main&#34;</span><span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>options<span style=color:#f92672>.</span><span style=color:#a6e22e>put</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;uri&#34;</span><span style=color:#f92672>,</span> <span style=color:#e6db74>&#34;https://localhost:19120/api/v1&#34;</span><span style=color:#f92672>);</span>
+</span></span><span style=display:flex><span>Catalog nessieCatalog <span style=color:#f92672>=</span> CatalogUtil<span style=color:#f92672>.</span><span style=color:#a6e22e>loadCatalog</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;org.apache.iceberg.nessie.NessieCatalog&#34;</span><span style=color:#f92672>,</span> <span style=color:#e6db74>&#34;nessie&#34;</span><span style=color:#f92672>,</span> options<span style=color:#f92672>,</span> hadoopConfig<span style= [...]
+</span></span></code></pre></div><p>and in Spark:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-java data-lang=java><span style=display:flex><span>conf<span style=color:#f92672>.</span><span style=color:#a6e22e>set</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;spark.sql.catalog.nessie.warehouse&#34;</span><span style=color:#f92672>,</span> <span style=color:#e6db7 [...]
+</span></span><span style=display:flex><span>conf<span style=color:#f92672>.</span><span style=color:#a6e22e>set</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;spark.sql.catalog.nessie.uri&#34;</span><span style=color:#f92672>,</span> <span style=color:#e6db74>&#34;http://localhost:19120/api/v1&#34;</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>conf<span style=color:#f92672>.</span><span style=color:#a6e22e>set</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;spark.sql.catalog.nessie.ref&#34;</span><span style=color:#f92672>,</span> <span style=color:#e6db74>&#34;main&#34;</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>conf<span style=color:#f92672>.</span><span style=color:#a6e22e>set</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;spark.sql.catalog.nessie.catalog-impl&#34;</span><span style=color:#f92672>,</span> <span style=color:#e6db74>&#34;org.apache.iceberg.nessie.NessieCatalog&#34;</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>conf<span style=color:#f92672>.</span><span style=color:#a6e22e>set</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;spark.sql.catalog.nessie&#34;</span><span style=color:#f92672>,</span> <span style=color:#e6db74>&#34;org.apache.iceberg.spark.SparkCatalog&#34;</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>conf<span style=color:#f92672>.</span><span style=color:#a6e22e>set</span><span style=color:#f92672>(</span><span style=color:#e6db74>&#34;spark.sql.extensions&#34;</span><span style=color:#f92672>,</span> <span style=color:#e6db74>&#34;org.apache.iceberg.spark.extensions.IcebergSparkSessionExtensions,org.projectnessie.spark.extensions.NessieSparkSessionExtensions&#34;</span><span style=color:#f92672>)</span>
+</span></span></code></pre></div><p>This is how it looks in Flink via the Python API (additional details can be found <a href=../flink#preparation-when-using-flinks-python-api>here</a>):</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-python data-lang=python><span style=display:flex><span><span style=color:#f92672>import</span> os
+</span></span><span style=display:flex><span><span style=color:#f92672>from</span> pyflink.datastream <span style=color:#f92672>import</span> StreamExecutionEnvironment
+</span></span><span style=display:flex><span><span style=color:#f92672>from</span> pyflink.table <span style=color:#f92672>import</span> StreamTableEnvironment
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>env <span style=color:#f92672>=</span> StreamExecutionEnvironment<span style=color:#f92672>.</span>get_execution_environment()
+</span></span><span style=display:flex><span>iceberg_flink_runtime_jar <span style=color:#f92672>=</span> os<span style=color:#f92672>.</span>path<span style=color:#f92672>.</span>join(os<span style=color:#f92672>.</span>getcwd(), <span style=color:#e6db74>&#34;iceberg-flink-runtime-1.0.0.jar&#34;</span>)
+</span></span><span style=display:flex><span>env<span style=color:#f92672>.</span>add_jars(<span style=color:#e6db74>&#34;file://</span><span style=color:#e6db74>{}</span><span style=color:#e6db74>&#34;</span><span style=color:#f92672>.</span>format(iceberg_flink_runtime_jar))
+</span></span><span style=display:flex><span>table_env <span style=color:#f92672>=</span> StreamTableEnvironment<span style=color:#f92672>.</span>create(env)
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>table_env<span style=color:#f92672>.</span>execute_sql(<span style=color:#e6db74>&#34;CREATE CATALOG nessie_catalog WITH (&#34;</span>
+</span></span><span style=display:flex><span>                      <span style=color:#e6db74>&#34;&#39;type&#39;=&#39;iceberg&#39;, &#34;</span>
+</span></span><span style=display:flex><span>                      <span style=color:#e6db74>&#34;&#39;catalog-impl&#39;=&#39;org.apache.iceberg.nessie.NessieCatalog&#39;, &#34;</span>
+</span></span><span style=display:flex><span>                      <span style=color:#e6db74>&#34;&#39;uri&#39;=&#39;http://localhost:19120/api/v1&#39;, &#34;</span>
+</span></span><span style=display:flex><span>                      <span style=color:#e6db74>&#34;&#39;ref&#39;=&#39;main&#39;, &#34;</span>
+</span></span><span style=display:flex><span>                      <span style=color:#e6db74>&#34;&#39;warehouse&#39;=&#39;/path/to/warehouse&#39;)&#34;</span>)
+</span></span></code></pre></div><p>There is nothing special above about the <code>nessie</code> name. A spark catalog can have any name, the important parts are the
+settings for the <code>catalog-impl</code> and the required config to start Nessie correctly.
+Once you have a Nessie catalog you have access to your entire Nessie repo. You can then perform create/delete/merge
+operations on branches and perform commits on branches. Each Iceberg table in a Nessie Catalog is identified by an
+arbitrary length namespace and table name (eg <code>data.base.name.table</code>). These namespaces are implicit and don&rsquo;t need to
+be created separately. Any transaction on a Nessie enabled Iceberg table is a single commit in Nessie. Nessie commits
+can encompass an arbitrary number of actions on an arbitrary number of tables, however in Iceberg this will be limited
+to the set of single table transactions currently available.</p><p>Further operations such as merges, viewing the commit log or diffs are performed by direct interaction with the
+<code>NessieClient</code> in java or by using the python client or cli. See <a href=https://projectnessie.org/tools/cli/>Nessie CLI</a> for
+more details on the CLI and <a href=https://projectnessie.org/tools/iceberg/spark/>Spark Guide</a> for a more complete description of
+Nessie functionality.</p><h2 id=nessie-and-iceberg>Nessie and Iceberg</h2><p>For most cases Nessie acts just like any other Catalog for Iceberg: providing a logical organization of a set of tables
+and providing atomicity to transactions. However, using Nessie opens up other interesting possibilities. When using Nessie with
+Iceberg every Iceberg transaction becomes a Nessie commit. This history can be listed, merged or cherry-picked across branches.</p><h3 id=loosely-coupled-transactions>Loosely coupled transactions</h3><p>By creating a branch and performing a set of operations on that branch you can approximate a multi-table transaction.
+A sequence of commits can be performed on the newly created branch and then merged back into the main branch atomically.
+This gives the appearance of a series of connected changes being exposed to the main branch simultaneously. While downstream
+consumers will see multiple transactions appear at once this isn&rsquo;t a true multi-table transaction on the database. It is
+effectively a fast-forward merge of multiple commits (in git language) and each operation from the branch is its own distinct
+transaction and commit. This is different from a real multi-table transaction where all changes would be in the same commit.
+This does allow multiple applications to take part in modifying a branch and for this distributed set of transactions to be
+exposed to the downstream users simultaneously.</p><h3 id=experimentation>Experimentation</h3><p>Changes to a table can be tested in a branch before merging back into main. This is particularly useful when performing
+large changes like schema evolution or partition evolution. A partition evolution could be performed in a branch and you
+would be able to test out the change (eg performance benchmarks) before merging it. This provides great flexibility in
+performing on-line table modifications and testing without interrupting downstream use cases. If the changes are
+incorrect or not performant the branch can be dropped without being merged.</p><h3 id=further-use-cases>Further use cases</h3><p>Please see the <a href=https://projectnessie.org/features/>Nessie Documentation</a> for further descriptions of
+Nessie features.</p><div class=danger>Regular table maintenance in Iceberg is complicated when using nessie. Please consult
+<a href=https://projectnessie.org/features/management/>Management Services</a> before performing any
+<a href=../maintenance>table maintenance</a>.</div><h2 id=example>Example</h2><p>Please have a look at the <a href=https://github.com/projectnessie/nessie-demos>Nessie Demos repo</a>
+for different examples of Nessie and Iceberg in action together.</p><h2 id=future-improvements>Future Improvements</h2><ul><li>Iceberg multi-table transactions. Changes to multiple Iceberg tables in the same transaction, isolation levels etc</li></ul></div><div id=toc class=markdown-body><div id=full><nav id=TableOfContents><ul><li><a href=#enabling-nessie-catalog>Enabling Nessie Catalog</a></li><li><a href=#spark-sql-extensions>Spark SQL Extensions</a></li><li><a href=#nessie-catalog>Ne [...]
+<script src=https://iceberg.apache.org/docs/1.1.0//js/jquery.easing.min.js></script>
+<script type=text/javascript src=https://iceberg.apache.org/docs/1.1.0//js/search.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/bootstrap.min.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/iceberg-theme.js></script></html>
\ No newline at end of file
diff --git a/docs/1.1.0/partitioning/index.html b/docs/1.1.0/partitioning/index.html
new file mode 100644
index 00000000..93d4b79a
--- /dev/null
+++ b/docs/1.1.0/partitioning/index.html
@@ -0,0 +1,28 @@
+<!doctype html><html><head><meta charset=utf-8><meta http-equiv=x-ua-compatible content="IE=edge"><meta name=viewport content="width=device-width,initial-scale=1"><meta name=description content><meta name=author content><title>Partitioning</title><link href=../css/bootstrap.css rel=stylesheet><link href=../css/markdown.css rel=stylesheet><link href=../css/katex.min.css rel=stylesheet><link href=../css/iceberg-theme.css rel=stylesheet><link href=../font-awesome-4.7.0/css/font-awesome.min. [...]
+<span class=sr-only>Toggle navigation</span>
+<span class=icon-bar></span>
+<span class=icon-bar></span>
+<span class=icon-bar></span></button>
+<a class="page-scroll navbar-brand" href=https://iceberg.apache.org/><img class=top-navbar-logo src=https://iceberg.apache.org/docs/1.1.0//img/iceberg-logo-icon.png> Apache Iceberg</a></div><div><input type=search class=form-control id=search-input placeholder=Search... maxlength=64 data-hotkeys=s/></div><div class=versions-dropdown><span>1.0.0</span> <i class="fa fa-chevron-down"></i><div class=versions-dropdown-content><ul><li class=versions-dropdown-selection><a href=https://iceberg.a [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Tables class="collapse in"><ul class=sub-menu><li><a href=../configuration/>Configuration</a></li><li><a href=../evolution/>Evolution</a></li><li><a href=../maintenance/>Maintenance</a></li><li><a id=active href=../partitioning/>Partitioning</a></li><li><a href=../performance/>Performance</a></li><li><a href=../reliability/>Reliability</a></li><li><a href=../schemas/>Schemas</a></li></ul></div><li><a class="chevron-toggle collapsed" data [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Spark class=collapse><ul class=sub-menu><li><a href=../spark-ddl/>DDL</a></li><li><a href=../getting-started/>Getting Started</a></li><li><a href=../spark-procedures/>Procedures</a></li><li><a href=../spark-queries/>Queries</a></li><li><a href=../spark-structured-streaming/>Structured Streaming</a></li><li><a href=../spark-writes/>Writes</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Flink class=collapse><ul class=sub-menu><li><a href=../flink/>Enabling Iceberg in Flink</a></li><li><a href=../flink-connector/>Flink Connector</a></li></ul></div><li><a href=../hive/><span>Hive</span></a></li><li><a target=_blank href=https://trino.io/docs/current/connector/iceberg.html><span>Trino</span></a></li><li><a target=_blank href=https://prestodb.io/docs/current/connector/iceberg.html><span>Presto</span></a></li><li><a target=_ [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Integrations class=collapse><ul class=sub-menu><li><a href=../aws/>AWS</a></li><li><a href=../dell/>Dell</a></li><li><a href=../jdbc/>JDBC</a></li><li><a href=../nessie/>Nessie</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href=#API><span>API</span>
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=API class=collapse><ul class=sub-menu><li><a href=../java-api-quickstart/>Java Quickstart</a></li><li><a href=../api/>Java API</a></li><li><a href=../custom-catalog/>Java Custom Catalog</a></li></ul></div><li><a href=https://iceberg.apache.org/docs/1.1.0/../../javadoc/latest><span>Javadoc</span></a></li></div></div><div id=content class=markdown-body><div class=margin-for-toc><h1 id=partitioning>Partitioning</h1><h2 id=what-is-partitioni [...]
+</span></span><span style=display:flex><span><span style=color:#66d9ef>WHERE</span> event_time <span style=color:#66d9ef>BETWEEN</span> <span style=color:#e6db74>&#39;2018-12-01 10:00:00&#39;</span> <span style=color:#66d9ef>AND</span> <span style=color:#e6db74>&#39;2018-12-01 12:00:00&#39;</span>
+</span></span></code></pre></div><p>Configuring the <code>logs</code> table to partition by the date of <code>event_time</code> will group log events into files with the same event date. Iceberg keeps track of that date and will use it to skip files for other dates that don&rsquo;t have useful data.</p><p>Iceberg can partition timestamps by year, month, day, and hour granularity. It can also use a categorical column, like <code>level</code> in this logs example, to store rows together an [...]
+</span></span><span style=display:flex><span>  <span style=color:#66d9ef>SELECT</span> <span style=color:#66d9ef>level</span>, message, event_time, format_time(event_time, <span style=color:#e6db74>&#39;YYYY-MM-dd&#39;</span>)
+</span></span><span style=display:flex><span>  <span style=color:#66d9ef>FROM</span> unstructured_log_source
+</span></span></code></pre></div><p>Similarly, queries that search through the <code>logs</code> table must have an <code>event_date</code> filter in addition to an <code>event_time</code> filter.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>SELECT</span> <span style=color:#66d9ef>level</span>, <span style=color:# [...]
+</span></span><span style=display:flex><span><span style=color:#66d9ef>WHERE</span> event_time <span style=color:#66d9ef>BETWEEN</span> <span style=color:#e6db74>&#39;2018-12-01 10:00:00&#39;</span> <span style=color:#66d9ef>AND</span> <span style=color:#e6db74>&#39;2018-12-01 12:00:00&#39;</span>
+</span></span><span style=display:flex><span>  <span style=color:#66d9ef>AND</span> event_date <span style=color:#f92672>=</span> <span style=color:#e6db74>&#39;2018-12-01&#39;</span>
+</span></span></code></pre></div><p>If the <code>event_date</code> filter were missing, Hive would scan through every file in the table because it doesn&rsquo;t know that the <code>event_time</code> column is related to the <code>event_date</code> column.</p><h3 id=problems-with-hive-partitioning>Problems with Hive partitioning</h3><p>Hive must be given partition values. In the logs example, it doesn&rsquo;t know the relationship between <code>event_time</code> and <code>event_date</code [...]
+<script src=https://iceberg.apache.org/docs/1.1.0//js/jquery.easing.min.js></script>
+<script type=text/javascript src=https://iceberg.apache.org/docs/1.1.0//js/search.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/bootstrap.min.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/iceberg-theme.js></script></html>
\ No newline at end of file
diff --git a/docs/1.1.0/performance/index.html b/docs/1.1.0/performance/index.html
new file mode 100644
index 00000000..5ec0fb23
--- /dev/null
+++ b/docs/1.1.0/performance/index.html
@@ -0,0 +1,20 @@
+<!doctype html><html><head><meta charset=utf-8><meta http-equiv=x-ua-compatible content="IE=edge"><meta name=viewport content="width=device-width,initial-scale=1"><meta name=description content><meta name=author content><title>Performance</title><link href=../css/bootstrap.css rel=stylesheet><link href=../css/markdown.css rel=stylesheet><link href=../css/katex.min.css rel=stylesheet><link href=../css/iceberg-theme.css rel=stylesheet><link href=../font-awesome-4.7.0/css/font-awesome.min.c [...]
+<span class=sr-only>Toggle navigation</span>
+<span class=icon-bar></span>
+<span class=icon-bar></span>
+<span class=icon-bar></span></button>
+<a class="page-scroll navbar-brand" href=https://iceberg.apache.org/><img class=top-navbar-logo src=https://iceberg.apache.org/docs/1.1.0//img/iceberg-logo-icon.png> Apache Iceberg</a></div><div><input type=search class=form-control id=search-input placeholder=Search... maxlength=64 data-hotkeys=s/></div><div class=versions-dropdown><span>1.0.0</span> <i class="fa fa-chevron-down"></i><div class=versions-dropdown-content><ul><li class=versions-dropdown-selection><a href=https://iceberg.a [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Tables class="collapse in"><ul class=sub-menu><li><a href=../configuration/>Configuration</a></li><li><a href=../evolution/>Evolution</a></li><li><a href=../maintenance/>Maintenance</a></li><li><a href=../partitioning/>Partitioning</a></li><li><a id=active href=../performance/>Performance</a></li><li><a href=../reliability/>Reliability</a></li><li><a href=../schemas/>Schemas</a></li></ul></div><li><a class="chevron-toggle collapsed" data [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Spark class=collapse><ul class=sub-menu><li><a href=../spark-ddl/>DDL</a></li><li><a href=../getting-started/>Getting Started</a></li><li><a href=../spark-procedures/>Procedures</a></li><li><a href=../spark-queries/>Queries</a></li><li><a href=../spark-structured-streaming/>Structured Streaming</a></li><li><a href=../spark-writes/>Writes</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Flink class=collapse><ul class=sub-menu><li><a href=../flink/>Enabling Iceberg in Flink</a></li><li><a href=../flink-connector/>Flink Connector</a></li></ul></div><li><a href=../hive/><span>Hive</span></a></li><li><a target=_blank href=https://trino.io/docs/current/connector/iceberg.html><span>Trino</span></a></li><li><a target=_blank href=https://prestodb.io/docs/current/connector/iceberg.html><span>Presto</span></a></li><li><a target=_ [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Integrations class=collapse><ul class=sub-menu><li><a href=../aws/>AWS</a></li><li><a href=../dell/>Dell</a></li><li><a href=../jdbc/>JDBC</a></li><li><a href=../nessie/>Nessie</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href=#API><span>API</span>
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=API class=collapse><ul class=sub-menu><li><a href=../java-api-quickstart/>Java Quickstart</a></li><li><a href=../api/>Java API</a></li><li><a href=../custom-catalog/>Java Custom Catalog</a></li></ul></div><li><a href=https://iceberg.apache.org/docs/1.1.0/../../javadoc/latest><span>Javadoc</span></a></li></div></div><div id=content class=markdown-body><div class=margin-for-toc><h1 id=performance>Performance</h1><ul><li>Iceberg is designed [...]
+<script src=https://iceberg.apache.org/docs/1.1.0//js/jquery.easing.min.js></script>
+<script type=text/javascript src=https://iceberg.apache.org/docs/1.1.0//js/search.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/bootstrap.min.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/iceberg-theme.js></script></html>
\ No newline at end of file
diff --git a/docs/1.1.0/reliability/index.html b/docs/1.1.0/reliability/index.html
new file mode 100644
index 00000000..2855b1e8
--- /dev/null
+++ b/docs/1.1.0/reliability/index.html
@@ -0,0 +1,20 @@
+<!doctype html><html><head><meta charset=utf-8><meta http-equiv=x-ua-compatible content="IE=edge"><meta name=viewport content="width=device-width,initial-scale=1"><meta name=description content><meta name=author content><title>Reliability</title><link href=../css/bootstrap.css rel=stylesheet><link href=../css/markdown.css rel=stylesheet><link href=../css/katex.min.css rel=stylesheet><link href=../css/iceberg-theme.css rel=stylesheet><link href=../font-awesome-4.7.0/css/font-awesome.min.c [...]
+<span class=sr-only>Toggle navigation</span>
+<span class=icon-bar></span>
+<span class=icon-bar></span>
+<span class=icon-bar></span></button>
+<a class="page-scroll navbar-brand" href=https://iceberg.apache.org/><img class=top-navbar-logo src=https://iceberg.apache.org/docs/1.1.0//img/iceberg-logo-icon.png> Apache Iceberg</a></div><div><input type=search class=form-control id=search-input placeholder=Search... maxlength=64 data-hotkeys=s/></div><div class=versions-dropdown><span>1.0.0</span> <i class="fa fa-chevron-down"></i><div class=versions-dropdown-content><ul><li class=versions-dropdown-selection><a href=https://iceberg.a [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Tables class="collapse in"><ul class=sub-menu><li><a href=../configuration/>Configuration</a></li><li><a href=../evolution/>Evolution</a></li><li><a href=../maintenance/>Maintenance</a></li><li><a href=../partitioning/>Partitioning</a></li><li><a href=../performance/>Performance</a></li><li><a id=active href=../reliability/>Reliability</a></li><li><a href=../schemas/>Schemas</a></li></ul></div><li><a class="chevron-toggle collapsed" data [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Spark class=collapse><ul class=sub-menu><li><a href=../spark-ddl/>DDL</a></li><li><a href=../getting-started/>Getting Started</a></li><li><a href=../spark-procedures/>Procedures</a></li><li><a href=../spark-queries/>Queries</a></li><li><a href=../spark-structured-streaming/>Structured Streaming</a></li><li><a href=../spark-writes/>Writes</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Flink class=collapse><ul class=sub-menu><li><a href=../flink/>Enabling Iceberg in Flink</a></li><li><a href=../flink-connector/>Flink Connector</a></li></ul></div><li><a href=../hive/><span>Hive</span></a></li><li><a target=_blank href=https://trino.io/docs/current/connector/iceberg.html><span>Trino</span></a></li><li><a target=_blank href=https://prestodb.io/docs/current/connector/iceberg.html><span>Presto</span></a></li><li><a target=_ [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Integrations class=collapse><ul class=sub-menu><li><a href=../aws/>AWS</a></li><li><a href=../dell/>Dell</a></li><li><a href=../jdbc/>JDBC</a></li><li><a href=../nessie/>Nessie</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href=#API><span>API</span>
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=API class=collapse><ul class=sub-menu><li><a href=../java-api-quickstart/>Java Quickstart</a></li><li><a href=../api/>Java API</a></li><li><a href=../custom-catalog/>Java Custom Catalog</a></li></ul></div><li><a href=https://iceberg.apache.org/docs/1.1.0/../../javadoc/latest><span>Javadoc</span></a></li></div></div><div id=content class=markdown-body><div class=margin-for-toc><h1 id=reliability>Reliability</h1><p>Iceberg was designed to  [...]
+<script src=https://iceberg.apache.org/docs/1.1.0//js/jquery.easing.min.js></script>
+<script type=text/javascript src=https://iceberg.apache.org/docs/1.1.0//js/search.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/bootstrap.min.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/iceberg-theme.js></script></html>
\ No newline at end of file
diff --git a/docs/1.1.0/schemas/index.html b/docs/1.1.0/schemas/index.html
new file mode 100644
index 00000000..72aaed9c
--- /dev/null
+++ b/docs/1.1.0/schemas/index.html
@@ -0,0 +1,20 @@
+<!doctype html><html><head><meta charset=utf-8><meta http-equiv=x-ua-compatible content="IE=edge"><meta name=viewport content="width=device-width,initial-scale=1"><meta name=description content><meta name=author content><title>Schemas</title><link href=../css/bootstrap.css rel=stylesheet><link href=../css/markdown.css rel=stylesheet><link href=../css/katex.min.css rel=stylesheet><link href=../css/iceberg-theme.css rel=stylesheet><link href=../font-awesome-4.7.0/css/font-awesome.min.css r [...]
+<span class=sr-only>Toggle navigation</span>
+<span class=icon-bar></span>
+<span class=icon-bar></span>
+<span class=icon-bar></span></button>
+<a class="page-scroll navbar-brand" href=https://iceberg.apache.org/><img class=top-navbar-logo src=https://iceberg.apache.org/docs/1.1.0//img/iceberg-logo-icon.png> Apache Iceberg</a></div><div><input type=search class=form-control id=search-input placeholder=Search... maxlength=64 data-hotkeys=s/></div><div class=versions-dropdown><span>1.0.0</span> <i class="fa fa-chevron-down"></i><div class=versions-dropdown-content><ul><li class=versions-dropdown-selection><a href=https://iceberg.a [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Tables class="collapse in"><ul class=sub-menu><li><a href=../configuration/>Configuration</a></li><li><a href=../evolution/>Evolution</a></li><li><a href=../maintenance/>Maintenance</a></li><li><a href=../partitioning/>Partitioning</a></li><li><a href=../performance/>Performance</a></li><li><a href=../reliability/>Reliability</a></li><li><a id=active href=../schemas/>Schemas</a></li></ul></div><li><a class="chevron-toggle collapsed" data [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Spark class=collapse><ul class=sub-menu><li><a href=../spark-ddl/>DDL</a></li><li><a href=../getting-started/>Getting Started</a></li><li><a href=../spark-procedures/>Procedures</a></li><li><a href=../spark-queries/>Queries</a></li><li><a href=../spark-structured-streaming/>Structured Streaming</a></li><li><a href=../spark-writes/>Writes</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Flink class=collapse><ul class=sub-menu><li><a href=../flink/>Enabling Iceberg in Flink</a></li><li><a href=../flink-connector/>Flink Connector</a></li></ul></div><li><a href=../hive/><span>Hive</span></a></li><li><a target=_blank href=https://trino.io/docs/current/connector/iceberg.html><span>Trino</span></a></li><li><a target=_blank href=https://prestodb.io/docs/current/connector/iceberg.html><span>Presto</span></a></li><li><a target=_ [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Integrations class=collapse><ul class=sub-menu><li><a href=../aws/>AWS</a></li><li><a href=../dell/>Dell</a></li><li><a href=../jdbc/>JDBC</a></li><li><a href=../nessie/>Nessie</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href=#API><span>API</span>
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=API class=collapse><ul class=sub-menu><li><a href=../java-api-quickstart/>Java Quickstart</a></li><li><a href=../api/>Java API</a></li><li><a href=../custom-catalog/>Java Custom Catalog</a></li></ul></div><li><a href=https://iceberg.apache.org/docs/1.1.0/../../javadoc/latest><span>Javadoc</span></a></li></div></div><div id=content class=markdown-body><div class=margin-for-toc><h1 id=schemas>Schemas</h1><p>Iceberg tables support the follo [...]
+<script src=https://iceberg.apache.org/docs/1.1.0//js/jquery.easing.min.js></script>
+<script type=text/javascript src=https://iceberg.apache.org/docs/1.1.0//js/search.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/bootstrap.min.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/iceberg-theme.js></script></html>
\ No newline at end of file
diff --git a/docs/1.1.0/sitemap.xml b/docs/1.1.0/sitemap.xml
index b269432f..67d8d9af 100644
--- a/docs/1.1.0/sitemap.xml
+++ b/docs/1.1.0/sitemap.xml
@@ -1 +1 @@
-<?xml version="1.0" encoding="utf-8" standalone="yes"?><urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:xhtml="http://www.w3.org/1999/xhtml"><url><loc>https://iceberg.apache.org/docs/1.1.0/</loc></url><url><loc>https://iceberg.apache.org/docs/1.1.0/categories/</loc></url><url><loc>https://iceberg.apache.org/docs/1.1.0/tags/</loc></url></urlset>
\ No newline at end of file
+<?xml version="1.0" encoding="utf-8" standalone="yes"?><urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:xhtml="http://www.w3.org/1999/xhtml"><url><loc>https://iceberg.apache.org/docs/1.1.0/getting-started/</loc></url><url><loc>https://iceberg.apache.org/docs/1.1.0/hive/</loc></url><url><loc>https://iceberg.apache.org/docs/1.1.0/aws/</loc></url><url><loc>https://iceberg.apache.org/docs/1.1.0/categories/</loc></url><url><loc>https://iceberg.apache.org/docs/1.1.0/configurat [...]
\ No newline at end of file
diff --git a/docs/1.1.0/spark-configuration/index.html b/docs/1.1.0/spark-configuration/index.html
new file mode 100644
index 00000000..612f182f
--- /dev/null
+++ b/docs/1.1.0/spark-configuration/index.html
@@ -0,0 +1,45 @@
+<!doctype html><html><head><meta charset=utf-8><meta http-equiv=x-ua-compatible content="IE=edge"><meta name=viewport content="width=device-width,initial-scale=1"><meta name=description content><meta name=author content><title>Configuration</title><link href=../css/bootstrap.css rel=stylesheet><link href=../css/markdown.css rel=stylesheet><link href=../css/katex.min.css rel=stylesheet><link href=../css/iceberg-theme.css rel=stylesheet><link href=../font-awesome-4.7.0/css/font-awesome.min [...]
+<span class=sr-only>Toggle navigation</span>
+<span class=icon-bar></span>
+<span class=icon-bar></span>
+<span class=icon-bar></span></button>
+<a class="page-scroll navbar-brand" href=https://iceberg.apache.org/><img class=top-navbar-logo src=https://iceberg.apache.org/docs/1.1.0//img/iceberg-logo-icon.png> Apache Iceberg</a></div><div><input type=search class=form-control id=search-input placeholder=Search... maxlength=64 data-hotkeys=s/></div><div class=versions-dropdown><span>1.0.0</span> <i class="fa fa-chevron-down"></i><div class=versions-dropdown-content><ul><li class=versions-dropdown-selection><a href=https://iceberg.a [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Tables class=collapse><ul class=sub-menu><li><a href=../configuration/>Configuration</a></li><li><a href=../evolution/>Evolution</a></li><li><a href=../maintenance/>Maintenance</a></li><li><a href=../partitioning/>Partitioning</a></li><li><a href=../performance/>Performance</a></li><li><a href=../reliability/>Reliability</a></li><li><a href=../schemas/>Schemas</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collaps [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Spark class=collapse><ul class=sub-menu><li><a href=../spark-ddl/>DDL</a></li><li><a href=../getting-started/>Getting Started</a></li><li><a href=../spark-procedures/>Procedures</a></li><li><a href=../spark-queries/>Queries</a></li><li><a href=../spark-structured-streaming/>Structured Streaming</a></li><li><a href=../spark-writes/>Writes</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Flink class=collapse><ul class=sub-menu><li><a href=../flink/>Enabling Iceberg in Flink</a></li><li><a href=../flink-connector/>Flink Connector</a></li></ul></div><li><a href=../hive/><span>Hive</span></a></li><li><a target=_blank href=https://trino.io/docs/current/connector/iceberg.html><span>Trino</span></a></li><li><a target=_blank href=https://prestodb.io/docs/current/connector/iceberg.html><span>Presto</span></a></li><li><a target=_ [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Integrations class=collapse><ul class=sub-menu><li><a href=../aws/>AWS</a></li><li><a href=../dell/>Dell</a></li><li><a href=../jdbc/>JDBC</a></li><li><a href=../nessie/>Nessie</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href=#API><span>API</span>
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=API class=collapse><ul class=sub-menu><li><a href=../java-api-quickstart/>Java Quickstart</a></li><li><a href=../api/>Java API</a></li><li><a href=../custom-catalog/>Java Custom Catalog</a></li></ul></div><li><a href=https://iceberg.apache.org/docs/1.1.0/../../javadoc/latest><span>Javadoc</span></a></li></div></div><div id=content class=markdown-body><div class=margin-for-toc><h1 id=spark-configuration>Spark Configuration</h1><h2 id=cata [...]
+</span></span><span style=display:flex><span>spark.sql.catalog.hive_prod.type = hive
+</span></span><span style=display:flex><span>spark.sql.catalog.hive_prod.uri = thrift://metastore-host:port
+</span></span><span style=display:flex><span># omit uri to use the same URI as Spark: hive.metastore.uris in hive-site.xml
+</span></span></code></pre></div><p>Iceberg also supports a directory-based catalog in HDFS that can be configured using <code>type=hadoop</code>:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-plain data-lang=plain><span style=display:flex><span>spark.sql.catalog.hadoop_prod = org.apache.iceberg.spark.SparkCatalog
+</span></span><span style=display:flex><span>spark.sql.catalog.hadoop_prod.type = hadoop
+</span></span><span style=display:flex><span>spark.sql.catalog.hadoop_prod.warehouse = hdfs://nn:8020/warehouse/path
+</span></span></code></pre></div><div class=info>The Hive-based catalog only loads Iceberg tables. To load non-Iceberg tables in the same Hive metastore, use a <a href=#replacing-the-session-catalog>session catalog</a>.</div><h3 id=catalog-configuration>Catalog configuration</h3><p>A catalog is created and named by adding a property <code>spark.sql.catalog.(catalog-name)</code> with an implementation class for its value.</p><p>Iceberg supplies two implementations:</p><ul><li><code>org.ap [...]
+</span></span></span></code></pre></div><p>Spark 3 keeps track of the current catalog and namespace, which can be omitted from table names.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span>USE hive_prod.db;
+</span></span><span style=display:flex><span><span style=color:#66d9ef>SELECT</span> <span style=color:#f92672>*</span> <span style=color:#66d9ef>FROM</span> <span style=color:#66d9ef>table</span> <span style=color:#75715e>-- load db.table from catalog hive_prod
+</span></span></span></code></pre></div><p>To see the current catalog and namespace, run <code>SHOW CURRENT NAMESPACE</code>.</p><h3 id=replacing-the-session-catalog>Replacing the session catalog</h3><p>To add Iceberg table support to Spark&rsquo;s built-in catalog, configure <code>spark_catalog</code> to use Iceberg&rsquo;s <code>SparkSessionCatalog</code>.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code [...]
+</span></span><span style=display:flex><span>spark.sql.catalog.spark_catalog.type = hive
+</span></span></code></pre></div><p>Spark&rsquo;s built-in catalog supports existing v1 and v2 tables tracked in a Hive Metastore. This configures Spark to use Iceberg&rsquo;s <code>SparkSessionCatalog</code> as a wrapper around that session catalog. When a table is not an Iceberg table, the built-in catalog will be used to load it instead.</p><p>This configuration can use same Hive Metastore for both Iceberg and non-Iceberg tables.</p><h3 id=using-catalog-specific-hadoop-configuration-v [...]
+</span></span></code></pre></div><h3 id=loading-a-custom-catalog>Loading a custom catalog</h3><p>Spark supports loading a custom Iceberg <code>Catalog</code> implementation by specifying the <code>catalog-impl</code> property. Here is an example:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-plain data-lang=plain><span style=display:flex><span>spark.sql.catalog.custom_prod = org.apache.ic [...]
+</span></span><span style=display:flex><span>spark.sql.catalog.custom_prod.catalog-impl = com.my.custom.CatalogImpl
+</span></span><span style=display:flex><span>spark.sql.catalog.custom_prod.my-additional-catalog-config = my-value
+</span></span></code></pre></div><h3 id=catalogs-in-spark-24>Catalogs in Spark 2.4</h3><p>When using Iceberg 0.11.0 and later, Spark 2.4 can load tables from multiple Iceberg catalogs or from table locations.</p><p>Catalogs in 2.4 are configured just like catalogs in 3.x, but only Iceberg catalogs are supported.</p><h2 id=sql-extensions>SQL Extensions</h2><p>Iceberg 0.11.0 and later add an extension module to Spark to add new SQL commands, like <code>CALL</code> for stored procedures or  [...]
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>spark<span style=color:#f92672>.</span>read
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>option<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;snapshot-id&#34;</span><span style=color:#f92672>,</span> <span style=color:#ae81ff>10963874102873L</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>table<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;catalog.db.table&#34;</span><span style=color:#f92672>)</span>
+</span></span></code></pre></div><table><thead><tr><th>Spark option</th><th>Default</th><th>Description</th></tr></thead><tbody><tr><td>snapshot-id</td><td>(latest)</td><td>Snapshot ID of the table snapshot to read</td></tr><tr><td>as-of-timestamp</td><td>(latest)</td><td>A timestamp in milliseconds; the snapshot used will be the snapshot current at this time.</td></tr><tr><td>split-size</td><td>As per table property</td><td>Overrides this table&rsquo;s read.split.target-size and read.sp [...]
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>df<span style=color:#f92672>.</span>write
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>option<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;write-format&#34;</span><span style=color:#f92672>,</span> <span style=color:#e6db74>&#34;avro&#34;</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>option<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;snapshot-property.key&#34;</span><span style=color:#f92672>,</span> <span style=color:#e6db74>&#34;value&#34;</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>insertInto<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;catalog.db.table&#34;</span><span style=color:#f92672>)</span>
+</span></span></code></pre></div><table><thead><tr><th>Spark option</th><th>Default</th><th>Description</th></tr></thead><tbody><tr><td>write-format</td><td>Table write.format.default</td><td>File format to use for this write operation; parquet, avro, or orc</td></tr><tr><td>target-file-size-bytes</td><td>As per table property</td><td>Overrides this table&rsquo;s write.target-file-size-bytes</td></tr><tr><td>check-nullability</td><td>true</td><td>Sets the nullable check on fields</td></t [...]
+<script src=https://iceberg.apache.org/docs/1.1.0//js/jquery.easing.min.js></script>
+<script type=text/javascript src=https://iceberg.apache.org/docs/1.1.0//js/search.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/bootstrap.min.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/iceberg-theme.js></script></html>
\ No newline at end of file
diff --git a/docs/1.1.0/spark-ddl/index.html b/docs/1.1.0/spark-ddl/index.html
new file mode 100644
index 00000000..eb713cde
--- /dev/null
+++ b/docs/1.1.0/spark-ddl/index.html
@@ -0,0 +1,134 @@
+<!doctype html><html><head><meta charset=utf-8><meta http-equiv=x-ua-compatible content="IE=edge"><meta name=viewport content="width=device-width,initial-scale=1"><meta name=description content><meta name=author content><title>DDL</title><link href=../css/bootstrap.css rel=stylesheet><link href=../css/markdown.css rel=stylesheet><link href=../css/katex.min.css rel=stylesheet><link href=../css/iceberg-theme.css rel=stylesheet><link href=../font-awesome-4.7.0/css/font-awesome.min.css rel=s [...]
+<span class=sr-only>Toggle navigation</span>
+<span class=icon-bar></span>
+<span class=icon-bar></span>
+<span class=icon-bar></span></button>
+<a class="page-scroll navbar-brand" href=https://iceberg.apache.org/><img class=top-navbar-logo src=https://iceberg.apache.org/docs/1.1.0//img/iceberg-logo-icon.png> Apache Iceberg</a></div><div><input type=search class=form-control id=search-input placeholder=Search... maxlength=64 data-hotkeys=s/></div><div class=versions-dropdown><span>1.0.0</span> <i class="fa fa-chevron-down"></i><div class=versions-dropdown-content><ul><li class=versions-dropdown-selection><a href=https://iceberg.a [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Tables class=collapse><ul class=sub-menu><li><a href=../configuration/>Configuration</a></li><li><a href=../evolution/>Evolution</a></li><li><a href=../maintenance/>Maintenance</a></li><li><a href=../partitioning/>Partitioning</a></li><li><a href=../performance/>Performance</a></li><li><a href=../reliability/>Reliability</a></li><li><a href=../schemas/>Schemas</a></li></ul></div><li><a class=chevron-toggle data-toggle=collapse data-paren [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Spark class="collapse in"><ul class=sub-menu><li><a id=active href=../spark-ddl/>DDL</a></li><li><a href=../getting-started/>Getting Started</a></li><li><a href=../spark-procedures/>Procedures</a></li><li><a href=../spark-queries/>Queries</a></li><li><a href=../spark-structured-streaming/>Structured Streaming</a></li><li><a href=../spark-writes/>Writes</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-p [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Flink class=collapse><ul class=sub-menu><li><a href=../flink/>Enabling Iceberg in Flink</a></li><li><a href=../flink-connector/>Flink Connector</a></li></ul></div><li><a href=../hive/><span>Hive</span></a></li><li><a target=_blank href=https://trino.io/docs/current/connector/iceberg.html><span>Trino</span></a></li><li><a target=_blank href=https://prestodb.io/docs/current/connector/iceberg.html><span>Presto</span></a></li><li><a target=_ [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Integrations class=collapse><ul class=sub-menu><li><a href=../aws/>AWS</a></li><li><a href=../dell/>Dell</a></li><li><a href=../jdbc/>JDBC</a></li><li><a href=../nessie/>Nessie</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href=#API><span>API</span>
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=API class=collapse><ul class=sub-menu><li><a href=../java-api-quickstart/>Java Quickstart</a></li><li><a href=../api/>Java API</a></li><li><a href=../custom-catalog/>Java Custom Catalog</a></li></ul></div><li><a href=https://iceberg.apache.org/docs/1.1.0/../../javadoc/latest><span>Javadoc</span></a></li></div></div><div id=content class=markdown-body><div class=margin-for-toc><h1 id=spark-ddl>Spark DDL</h1><p>To use Iceberg in Spark, fir [...]
+</span></span><span style=display:flex><span>    id bigint <span style=color:#66d9ef>COMMENT</span> <span style=color:#e6db74>&#39;unique id&#39;</span>,
+</span></span><span style=display:flex><span>    <span style=color:#66d9ef>data</span> string)
+</span></span><span style=display:flex><span><span style=color:#66d9ef>USING</span> iceberg
+</span></span></code></pre></div><p>Iceberg will convert the column type in Spark to corresponding Iceberg type. Please check the section of <a href=../spark-writes#spark-type-to-iceberg-type>type compatibility on creating table</a> for details.</p><p>Table create commands, including CTAS and RTAS, support the full range of Spark create clauses, including:</p><ul><li><code>PARTITIONED BY (partition-expressions)</code> to configure partitioning</li><li><code>LOCATION '(fully-qualified-uri [...]
+</span></span><span style=display:flex><span>    id bigint,
+</span></span><span style=display:flex><span>    <span style=color:#66d9ef>data</span> string,
+</span></span><span style=display:flex><span>    category string)
+</span></span><span style=display:flex><span><span style=color:#66d9ef>USING</span> iceberg
+</span></span><span style=display:flex><span>PARTITIONED <span style=color:#66d9ef>BY</span> (category)
+</span></span></code></pre></div><p>The <code>PARTITIONED BY</code> clause supports transform expressions to create <a href=../partitioning>hidden partitions</a>.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>CREATE</span> <span style=color:#66d9ef>TABLE</span> prod.db.sample (
+</span></span><span style=display:flex><span>    id bigint,
+</span></span><span style=display:flex><span>    <span style=color:#66d9ef>data</span> string,
+</span></span><span style=display:flex><span>    category string,
+</span></span><span style=display:flex><span>    ts <span style=color:#66d9ef>timestamp</span>)
+</span></span><span style=display:flex><span><span style=color:#66d9ef>USING</span> iceberg
+</span></span><span style=display:flex><span>PARTITIONED <span style=color:#66d9ef>BY</span> (bucket(<span style=color:#ae81ff>16</span>, id), days(ts), category)
+</span></span></code></pre></div><p>Supported transformations are:</p><ul><li><code>years(ts)</code>: partition by year</li><li><code>months(ts)</code>: partition by month</li><li><code>days(ts)</code> or <code>date(ts)</code>: equivalent to dateint partitioning</li><li><code>hours(ts)</code> or <code>date_hour(ts)</code>: equivalent to dateint and hour partitioning</li><li><code>bucket(N, col)</code>: partition by hashed value mod N buckets</li><li><code>truncate(L, col)</code>: partiti [...]
+</span></span><span style=display:flex><span><span style=color:#66d9ef>USING</span> iceberg
+</span></span><span style=display:flex><span><span style=color:#66d9ef>AS</span> <span style=color:#66d9ef>SELECT</span> ...
+</span></span></code></pre></div><p>The newly created table won&rsquo;t inherit the partition spec and table properties from the source table in SELECT, you can use PARTITIONED BY and TBLPROPERTIES in CTAS to declare partition spec and table properties for the new table.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9e [...]
+</span></span><span style=display:flex><span><span style=color:#66d9ef>USING</span> iceberg
+</span></span><span style=display:flex><span>PARTITIONED <span style=color:#66d9ef>BY</span> (part)
+</span></span><span style=display:flex><span>TBLPROPERTIES (<span style=color:#e6db74>&#39;key&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;value&#39;</span>)
+</span></span><span style=display:flex><span><span style=color:#66d9ef>AS</span> <span style=color:#66d9ef>SELECT</span> ...
+</span></span></code></pre></div><h2 id=replace-table--as-select><code>REPLACE TABLE ... AS SELECT</code></h2><p>Iceberg supports RTAS as an atomic operation when using a <a href=../spark-configuration#catalog-configuration><code>SparkCatalog</code></a>. RTAS is supported, but is not atomic when using <a href=../spark-configuration#replacing-the-session-catalog><code>SparkSessionCatalog</code></a>.</p><p>Atomic table replacement creates a new snapshot with the results of the <code>SELECT [...]
+</span></span><span style=display:flex><span><span style=color:#66d9ef>USING</span> iceberg
+</span></span><span style=display:flex><span><span style=color:#66d9ef>AS</span> <span style=color:#66d9ef>SELECT</span> ...
+</span></span></code></pre></div><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>REPLACE</span> <span style=color:#66d9ef>TABLE</span> prod.db.sample
+</span></span><span style=display:flex><span><span style=color:#66d9ef>USING</span> iceberg
+</span></span><span style=display:flex><span>PARTITIONED <span style=color:#66d9ef>BY</span> (part)
+</span></span><span style=display:flex><span>TBLPROPERTIES (<span style=color:#e6db74>&#39;key&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;value&#39;</span>)
+</span></span><span style=display:flex><span><span style=color:#66d9ef>AS</span> <span style=color:#66d9ef>SELECT</span> ...
+</span></span></code></pre></div><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>CREATE</span> <span style=color:#66d9ef>OR</span> <span style=color:#66d9ef>REPLACE</span> <span style=color:#66d9ef>TABLE</span> prod.db.sample
+</span></span><span style=display:flex><span><span style=color:#66d9ef>USING</span> iceberg
+</span></span><span style=display:flex><span><span style=color:#66d9ef>AS</span> <span style=color:#66d9ef>SELECT</span> ...
+</span></span></code></pre></div><p>The schema and partition spec will be replaced if changed. To avoid modifying the table&rsquo;s schema and partitioning, use <code>INSERT OVERWRITE</code> instead of <code>REPLACE TABLE</code>.
+The new table properties in the <code>REPLACE TABLE</code> command will be merged with any existing table properties. The existing table properties will be updated if changed else they are preserved.</p><h2 id=drop-table><code>DROP TABLE</code></h2><p>The drop table behavior changed in 0.14.</p><p>Prior to 0.14, running <code>DROP TABLE</code> would remove the table from the catalog and delete the table contents as well.</p><p>From 0.14 onwards, <code>DROP TABLE</code> would only remove  [...]
+In order to delete the table contents <code>DROP TABLE PURGE</code> should be used.</p><h3 id=drop-table-1><code>DROP TABLE</code></h3><p>To drop the table from the catalog, run:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>DROP</span> <span style=color:#66d9ef>TABLE</span> prod.db.sample
+</span></span></code></pre></div><h3 id=drop-table-purge><code>DROP TABLE PURGE</code></h3><p>To drop the table from the catalog and delete the table&rsquo;s contents, run:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>DROP</span> <span style=color:#66d9ef>TABLE</span> prod.db.sample PURGE
+</span></span></code></pre></div><h2 id=alter-table><code>ALTER TABLE</code></h2><p>Iceberg has full <code>ALTER TABLE</code> support in Spark 3, including:</p><ul><li>Renaming a table</li><li>Setting or removing table properties</li><li>Adding, deleting, and renaming columns</li><li>Adding, deleting, and renaming nested fields</li><li>Reordering top-level columns and nested struct fields</li><li>Widening the type of <code>int</code>, <code>float</code>, and <code>decimal</code> fields</ [...]
+</span></span></code></pre></div><h3 id=alter-table--set-tblproperties><code>ALTER TABLE ... SET TBLPROPERTIES</code></h3><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> prod.db.sample <span style=color:#66d9ef>SET</span> TBLPROPERTIES (
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#39;read.split.target-size&#39;</span><span style=color:#f92672>=</span><span style=color:#e6db74>&#39;268435456&#39;</span>
+</span></span><span style=display:flex><span>)
+</span></span></code></pre></div><p>Iceberg uses table properties to control table behavior. For a list of available properties, see <a href=../configuration>Table configuration</a>.</p><p><code>UNSET</code> is used to remove properties:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>ALTER</span> <span style=color:# [...]
+</span></span></code></pre></div><p><code>SET TBLPROPERTIES</code> can also be used to set the table comment (description):</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> prod.db.sample <span style=color:#66d9ef>SET</span> TBLPROPERTIES (
+</span></span><span style=display:flex><span>    <span style=color:#e6db74>&#39;comment&#39;</span> <span style=color:#f92672>=</span> <span style=color:#e6db74>&#39;A table comment.&#39;</span>
+</span></span><span style=display:flex><span>)
+</span></span></code></pre></div><h3 id=alter-table--add-column><code>ALTER TABLE ... ADD COLUMN</code></h3><p>To add a column to Iceberg, use the <code>ADD COLUMNS</code> clause with <code>ALTER TABLE</code>:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> prod.db.sample
+</span></span><span style=display:flex><span><span style=color:#66d9ef>ADD</span> COLUMNS (
+</span></span><span style=display:flex><span>    new_column string <span style=color:#66d9ef>comment</span> <span style=color:#e6db74>&#39;new_column docs&#39;</span>
+</span></span><span style=display:flex><span>  )
+</span></span></code></pre></div><p>Multiple columns can be added at the same time, separated by commas.</p><p>Nested columns should be identified using the full column name:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#75715e>-- create a struct column
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> prod.db.sample
+</span></span><span style=display:flex><span><span style=color:#66d9ef>ADD</span> <span style=color:#66d9ef>COLUMN</span> point struct<span style=color:#f92672>&lt;</span>x: double, y: double<span style=color:#f92672>&gt;</span>;
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#75715e>-- add a field to the struct
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> prod.db.sample
+</span></span><span style=display:flex><span><span style=color:#66d9ef>ADD</span> <span style=color:#66d9ef>COLUMN</span> point.z double
+</span></span></code></pre></div><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#75715e>-- create a nested array column of struct
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> prod.db.sample
+</span></span><span style=display:flex><span><span style=color:#66d9ef>ADD</span> <span style=color:#66d9ef>COLUMN</span> points array<span style=color:#f92672>&lt;</span>struct<span style=color:#f92672>&lt;</span>x: double, y: double<span style=color:#f92672>&gt;&gt;</span>;
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#75715e>-- add a field to the struct within an array. Using keyword &#39;element&#39; to access the array&#39;s element column.
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> prod.db.sample
+</span></span><span style=display:flex><span><span style=color:#66d9ef>ADD</span> <span style=color:#66d9ef>COLUMN</span> points.element.z double
+</span></span></code></pre></div><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#75715e>-- create a map column of struct key and struct value
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> prod.db.sample
+</span></span><span style=display:flex><span><span style=color:#66d9ef>ADD</span> <span style=color:#66d9ef>COLUMN</span> points <span style=color:#66d9ef>map</span><span style=color:#f92672>&lt;</span>struct<span style=color:#f92672>&lt;</span>x: int<span style=color:#f92672>&gt;</span>, struct<span style=color:#f92672>&lt;</span>a: int<span style=color:#f92672>&gt;&gt;</span>;
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#75715e>-- add a field to the value struct in a map. Using keyword &#39;value&#39; to access the map&#39;s value column.
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> prod.db.sample
+</span></span><span style=display:flex><span><span style=color:#66d9ef>ADD</span> <span style=color:#66d9ef>COLUMN</span> points.value.b int
+</span></span></code></pre></div><p>Note: Altering a map &lsquo;key&rsquo; column by adding columns is not allowed. Only map values can be updated.</p><p>In Spark 2.4.4 and later, you can add columns in any position by adding <code>FIRST</code> or <code>AFTER</code> clauses:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#6 [...]
+</span></span><span style=display:flex><span><span style=color:#66d9ef>ADD</span> <span style=color:#66d9ef>COLUMN</span> new_column bigint <span style=color:#66d9ef>AFTER</span> other_column
+</span></span></code></pre></div><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> prod.db.sample
+</span></span><span style=display:flex><span><span style=color:#66d9ef>ADD</span> <span style=color:#66d9ef>COLUMN</span> nested.new_column bigint <span style=color:#66d9ef>FIRST</span>
+</span></span></code></pre></div><h3 id=alter-table--rename-column><code>ALTER TABLE ... RENAME COLUMN</code></h3><p>Iceberg allows any field to be renamed. To rename a field, use <code>RENAME COLUMN</code>:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> prod.db.sa [...]
+</span></span><span style=display:flex><span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> prod.db.sample <span style=color:#66d9ef>RENAME</span> <span style=color:#66d9ef>COLUMN</span> <span style=color:#66d9ef>location</span>.lat <span style=color:#66d9ef>TO</span> latitude
+</span></span></code></pre></div><p>Note that nested rename commands only rename the leaf field. The above command renames <code>location.lat</code> to <code>location.latitude</code></p><h3 id=alter-table--alter-column><code>ALTER TABLE ... ALTER COLUMN</code></h3><p>Alter column is used to widen types, make a field optional, set comments, and reorder fields.</p><p>Iceberg allows updating column types if the update is safe. Safe updates are:</p><ul><li><code>int</code> to <code>bigint</c [...]
+</span></span></code></pre></div><p>To add or remove columns from a struct, use <code>ADD COLUMN</code> or <code>DROP COLUMN</code> with a nested column name.</p><p>Column comments can also be updated using <code>ALTER COLUMN</code>:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9 [...]
+</span></span><span style=display:flex><span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> prod.db.sample <span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>COLUMN</span> measurement <span style=color:#66d9ef>COMMENT</span> <span style=color:#e6db74>&#39;unit is kilobytes per second&#39;</span>
+</span></span></code></pre></div><p>Iceberg allows reordering top-level columns or columns in a struct using <code>FIRST</code> and <code>AFTER</code> clauses:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> prod.db.sample <span style=color:#66d9ef>ALTER</span> <spa [...]
+</span></span></code></pre></div><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> prod.db.sample <span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>COLUMN</span> nested.col <span style=color:#66d9ef>AFTER</span> other_col
+</span></span></code></pre></div><p>Nullability can be changed using <code>SET NOT NULL</code> and <code>DROP NOT NULL</code>:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> prod.db.sample <span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>COLUMN</spa [...]
+</span></span></code></pre></div><div class=info><code>ALTER COLUMN</code> is not used to update <code>struct</code> types. Use <code>ADD COLUMN</code> and <code>DROP COLUMN</code> to add or remove struct fields.</div><h3 id=alter-table--drop-column><code>ALTER TABLE ... DROP COLUMN</code></h3><p>To drop columns, use <code>ALTER TABLE ... DROP COLUMN</code>:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code [...]
+</span></span><span style=display:flex><span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> prod.db.sample <span style=color:#66d9ef>DROP</span> <span style=color:#66d9ef>COLUMN</span> point.z
+</span></span></code></pre></div><h2 id=alter-table-sql-extensions><code>ALTER TABLE</code> SQL extensions</h2><p>These commands are available in Spark 3 when using Iceberg <a href=../spark-configuration#sql-extensions>SQL extensions</a>.</p><h3 id=alter-table--add-partition-field><code>ALTER TABLE ... ADD PARTITION FIELD</code></h3><p>Iceberg supports adding new partition fields to a spec using <code>ADD PARTITION FIELD</code>:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2 [...]
+</span></span></span></code></pre></div><p><a href=#partitioned-by>Partition transforms</a> are also supported:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> prod.db.sample <span style=color:#66d9ef>ADD</span> PARTITION FIELD bucket(<span style=color:#ae81ff>16</s [...]
+</span></span><span style=display:flex><span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> prod.db.sample <span style=color:#66d9ef>ADD</span> PARTITION FIELD <span style=color:#66d9ef>truncate</span>(<span style=color:#66d9ef>data</span>, <span style=color:#ae81ff>4</span>)
+</span></span><span style=display:flex><span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> prod.db.sample <span style=color:#66d9ef>ADD</span> PARTITION FIELD years(ts)
+</span></span><span style=display:flex><span><span style=color:#75715e>-- use optional AS keyword to specify a custom name for the partition field 
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> prod.db.sample <span style=color:#66d9ef>ADD</span> PARTITION FIELD bucket(<span style=color:#ae81ff>16</span>, id) <span style=color:#66d9ef>AS</span> shard
+</span></span></code></pre></div><p>Adding a partition field is a metadata operation and does not change any of the existing table data. New data will be written with the new partitioning, but existing data will remain in the old partition layout. Old data files will have null values for the new partition fields in metadata tables.</p><p>Dynamic partition overwrite behavior will change when the table&rsquo;s partitioning changes because dynamic overwrite replaces partitions implicitly. T [...]
+For example, if you partition by days and move to partitioning by hours, overwrites will overwrite hourly partitions but not days anymore.</div><h3 id=alter-table--drop-partition-field><code>ALTER TABLE ... DROP PARTITION FIELD</code></h3><p>Partition fields can be removed using <code>DROP PARTITION FIELD</code>:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style= [...]
+</span></span><span style=display:flex><span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> prod.db.sample <span style=color:#66d9ef>DROP</span> PARTITION FIELD bucket(<span style=color:#ae81ff>16</span>, id)
+</span></span><span style=display:flex><span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> prod.db.sample <span style=color:#66d9ef>DROP</span> PARTITION FIELD <span style=color:#66d9ef>truncate</span>(<span style=color:#66d9ef>data</span>, <span style=color:#ae81ff>4</span>)
+</span></span><span style=display:flex><span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> prod.db.sample <span style=color:#66d9ef>DROP</span> PARTITION FIELD years(ts)
+</span></span><span style=display:flex><span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> prod.db.sample <span style=color:#66d9ef>DROP</span> PARTITION FIELD shard
+</span></span></code></pre></div><p>Note that although the partition is removed, the column will still exist in the table schema.</p><p>Dropping a partition field is a metadata operation and does not change any of the existing table data. New data will be written with the new partitioning, but existing data will remain in the old partition layout.</p><div class=danger><strong>Dynamic partition overwrite behavior will change</strong> when partitioning changes
+For example, if you partition by days and move to partitioning by hours, overwrites will overwrite hourly partitions but not days anymore.</div><div class=danger>Be careful when dropping a partition field because it will change the schema of metadata tables, like <code>files</code>, and may cause metadata queries to fail or produce different results.</div><h3 id=alter-table--replace-partition-field><code>ALTER TABLE ... REPLACE PARTITION FIELD</code></h3><p>A partition field can be repla [...]
+</span></span><span style=display:flex><span><span style=color:#75715e>-- use optional AS keyword to specify a custom name for the new partition field 
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> prod.db.sample <span style=color:#66d9ef>REPLACE</span> PARTITION FIELD ts_day <span style=color:#66d9ef>WITH</span> days(ts) <span style=color:#66d9ef>AS</span> day_of_ts
+</span></span></code></pre></div><h3 id=alter-table--write-ordered-by><code>ALTER TABLE ... WRITE ORDERED BY</code></h3><p>Iceberg tables can be configured with a sort order that is used to automatically sort data that is written to the table in some engines. For example, <code>MERGE INTO</code> in Spark will use the table ordering.</p><p>To set the write order for a table, use <code>WRITE ORDERED BY</code>:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#27 [...]
+</span></span><span style=display:flex><span><span style=color:#75715e>-- use optional ASC/DEC keyword to specify sort order of each field (default ASC)
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> prod.db.sample <span style=color:#66d9ef>WRITE</span> ORDERED <span style=color:#66d9ef>BY</span> category <span style=color:#66d9ef>ASC</span>, id <span style=color:#66d9ef>DESC</span>
+</span></span><span style=display:flex><span><span style=color:#75715e>-- use optional NULLS FIRST/NULLS LAST keyword to specify null order of each field (default FIRST)
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> prod.db.sample <span style=color:#66d9ef>WRITE</span> ORDERED <span style=color:#66d9ef>BY</span> category <span style=color:#66d9ef>ASC</span> NULLS <span style=color:#66d9ef>LAST</span>, id <span style=color:#66d9ef>DESC</span> NULLS <span style=color:#66d9ef>FIRST</span>
+</span></span></code></pre></div><div class=info>Table write order does not guarantee data order for queries. It only affects how data is written to the table.</div><p><code>WRITE ORDERED BY</code> sets a global ordering where rows are ordered across tasks, like using <code>ORDER BY</code> in an <code>INSERT</code> command:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql>< [...]
+</span></span><span style=display:flex><span><span style=color:#66d9ef>SELECT</span> id, <span style=color:#66d9ef>data</span>, category, ts <span style=color:#66d9ef>FROM</span> another_table
+</span></span><span style=display:flex><span><span style=color:#66d9ef>ORDER</span> <span style=color:#66d9ef>BY</span> ts, category
+</span></span></code></pre></div><p>To order within each task, not across tasks, use <code>LOCALLY ORDERED BY</code>:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> prod.db.sample <span style=color:#66d9ef>WRITE</span> LOCALLY ORDERED <span style=color:#66d9ef>BY</ [...]
+</span></span></code></pre></div><h3 id=alter-table--write-distributed-by-partition><code>ALTER TABLE ... WRITE DISTRIBUTED BY PARTITION</code></h3><p><code>WRITE DISTRIBUTED BY PARTITION</code> will request that each partition is handled by one writer, the default implementation is hash distribution.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex [...]
+</span></span></code></pre></div><p><code>DISTRIBUTED BY PARTITION</code> and <code>LOCALLY ORDERED BY</code> may be used together, to distribute by partition and locally order rows within each task.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>ALTER</span> <span style=color:#66d9ef>TABLE</span> prod.db.sample <sp [...]
+</span></span></code></pre></div></div><div id=toc class=markdown-body><div id=full><nav id=TableOfContents><ul><li><a href=#create-table><code>CREATE TABLE</code></a><ul><li><a href=#partitioned-by><code>PARTITIONED BY</code></a></li></ul></li><li><a href=#create-table--as-select><code>CREATE TABLE ... AS SELECT</code></a></li><li><a href=#replace-table--as-select><code>REPLACE TABLE ... AS SELECT</code></a></li><li><a href=#drop-table><code>DROP TABLE</code></a><ul><li><a href=#drop-ta [...]
+<script src=https://iceberg.apache.org/docs/1.1.0//js/jquery.easing.min.js></script>
+<script type=text/javascript src=https://iceberg.apache.org/docs/1.1.0//js/search.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/bootstrap.min.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/iceberg-theme.js></script></html>
\ No newline at end of file
diff --git a/docs/1.1.0/spark-procedures/index.html b/docs/1.1.0/spark-procedures/index.html
new file mode 100644
index 00000000..d027ac11
--- /dev/null
+++ b/docs/1.1.0/spark-procedures/index.html
@@ -0,0 +1,80 @@
+<!doctype html><html><head><meta charset=utf-8><meta http-equiv=x-ua-compatible content="IE=edge"><meta name=viewport content="width=device-width,initial-scale=1"><meta name=description content><meta name=author content><title>Procedures</title><link href=../css/bootstrap.css rel=stylesheet><link href=../css/markdown.css rel=stylesheet><link href=../css/katex.min.css rel=stylesheet><link href=../css/iceberg-theme.css rel=stylesheet><link href=../font-awesome-4.7.0/css/font-awesome.min.cs [...]
+<span class=sr-only>Toggle navigation</span>
+<span class=icon-bar></span>
+<span class=icon-bar></span>
+<span class=icon-bar></span></button>
+<a class="page-scroll navbar-brand" href=https://iceberg.apache.org/><img class=top-navbar-logo src=https://iceberg.apache.org/docs/1.1.0//img/iceberg-logo-icon.png> Apache Iceberg</a></div><div><input type=search class=form-control id=search-input placeholder=Search... maxlength=64 data-hotkeys=s/></div><div class=versions-dropdown><span>1.0.0</span> <i class="fa fa-chevron-down"></i><div class=versions-dropdown-content><ul><li class=versions-dropdown-selection><a href=https://iceberg.a [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Tables class=collapse><ul class=sub-menu><li><a href=../configuration/>Configuration</a></li><li><a href=../evolution/>Evolution</a></li><li><a href=../maintenance/>Maintenance</a></li><li><a href=../partitioning/>Partitioning</a></li><li><a href=../performance/>Performance</a></li><li><a href=../reliability/>Reliability</a></li><li><a href=../schemas/>Schemas</a></li></ul></div><li><a class=chevron-toggle data-toggle=collapse data-paren [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Spark class="collapse in"><ul class=sub-menu><li><a href=../spark-ddl/>DDL</a></li><li><a href=../getting-started/>Getting Started</a></li><li><a id=active href=../spark-procedures/>Procedures</a></li><li><a href=../spark-queries/>Queries</a></li><li><a href=../spark-structured-streaming/>Structured Streaming</a></li><li><a href=../spark-writes/>Writes</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-p [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Flink class=collapse><ul class=sub-menu><li><a href=../flink/>Enabling Iceberg in Flink</a></li><li><a href=../flink-connector/>Flink Connector</a></li></ul></div><li><a href=../hive/><span>Hive</span></a></li><li><a target=_blank href=https://trino.io/docs/current/connector/iceberg.html><span>Trino</span></a></li><li><a target=_blank href=https://prestodb.io/docs/current/connector/iceberg.html><span>Presto</span></a></li><li><a target=_ [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Integrations class=collapse><ul class=sub-menu><li><a href=../aws/>AWS</a></li><li><a href=../dell/>Dell</a></li><li><a href=../jdbc/>JDBC</a></li><li><a href=../nessie/>Nessie</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href=#API><span>API</span>
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=API class=collapse><ul class=sub-menu><li><a href=../java-api-quickstart/>Java Quickstart</a></li><li><a href=../api/>Java API</a></li><li><a href=../custom-catalog/>Java Custom Catalog</a></li></ul></div><li><a href=https://iceberg.apache.org/docs/1.1.0/../../javadoc/latest><span>Javadoc</span></a></li></div></div><div id=content class=markdown-body><div class=margin-for-toc><h1 id=spark-procedures>Spark Procedures</h1><p>To use Iceberg [...]
+</span></span></code></pre></div><h3 id=positional-arguments>Positional arguments</h3><p>When passing arguments by position, only the ending arguments may be omitted if they are optional.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>CALL</span> <span style=color:#66d9ef>catalog_name</span>.<span style=color:#66d9e [...]
+</span></span></code></pre></div><h2 id=snapshot-management>Snapshot management</h2><h3 id=rollback_to_snapshot><code>rollback_to_snapshot</code></h3><p>Roll back a table to a specific snapshot ID.</p><p>To roll back to a specific time, use <a href=#rollback_to_timestamp><code>rollback_to_timestamp</code></a>.</p><div class=info>This procedure invalidates all cached Spark plans that reference the affected table.</div><h4 id=usage-1>Usage</h4><table><thead><tr><th>Argument Name</th><th>Re [...]
+</span></span></code></pre></div><h3 id=rollback_to_timestamp><code>rollback_to_timestamp</code></h3><p>Roll back a table to the snapshot that was current at some time.</p><div class=info>This procedure invalidates all cached Spark plans that reference the affected table.</div><h4 id=usage-2>Usage</h4><table><thead><tr><th>Argument Name</th><th>Required?</th><th>Type</th><th>Description</th></tr></thead><tbody><tr><td><code>table</code></td><td>✔️</td><td>string</td><td>Name of the table [...]
+</span></span></code></pre></div><h3 id=set_current_snapshot><code>set_current_snapshot</code></h3><p>Sets the current snapshot ID for a table.</p><p>Unlike rollback, the snapshot is not required to be an ancestor of the current table state.</p><div class=info>This procedure invalidates all cached Spark plans that reference the affected table.</div><h4 id=usage-3>Usage</h4><table><thead><tr><th>Argument Name</th><th>Required?</th><th>Type</th><th>Description</th></tr></thead><tbody><tr>< [...]
+</span></span></code></pre></div><h3 id=cherrypick_snapshot><code>cherrypick_snapshot</code></h3><p>Cherry-picks changes from a snapshot into the current table state.</p><p>Cherry-picking creates a new snapshot from an existing snapshot without altering or removing the original.</p><p>Only append and dynamic overwrite snapshots can be cherry-picked.</p><div class=info>This procedure invalidates all cached Spark plans that reference the affected table.</div><h4 id=usage-4>Usage</h4><table [...]
+</span></span></code></pre></div><p>Cherry-pick snapshot 1 with named args</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>CALL</span> <span style=color:#66d9ef>catalog_name</span>.<span style=color:#66d9ef>system</span>.cherrypick_snapshot(snapshot_id <span style=color:#f92672>=&gt;</span> <span style=color:#ae81ff> [...]
+</span></span></code></pre></div><h2 id=metadata-management>Metadata management</h2><p>Many <a href=../maintenance>maintenance actions</a> can be performed using Iceberg stored procedures.</p><h3 id=expire_snapshots><code>expire_snapshots</code></h3><p>Each write/update/delete/upsert/compaction in Iceberg produces a new snapshot while keeping the old data and metadata
+around for snapshot isolation and time travel. The <code>expire_snapshots</code> procedure can be used to remove older snapshots
+and their files which are no longer needed.</p><p>This procedure will remove old snapshots and data files which are uniquely required by those old snapshots. This means
+the <code>expire_snapshots</code> procedure will never remove files which are still required by a non-expired snapshot.</p><h4 id=usage-5>Usage</h4><table><thead><tr><th>Argument Name</th><th>Required?</th><th>Type</th><th>Description</th></tr></thead><tbody><tr><td><code>table</code></td><td>✔️</td><td>string</td><td>Name of the table to update</td></tr><tr><td><code>older_than</code></td><td>️</td><td>timestamp</td><td>Timestamp before which snapshots will be removed (Default: 5 days a [...]
+</span></span></code></pre></div><h3 id=remove_orphan_files><code>remove_orphan_files</code></h3><p>Used to remove files which are not referenced in any metadata files of an Iceberg table and can thus be considered &ldquo;orphaned&rdquo;.</p><h4 id=usage-6>Usage</h4><table><thead><tr><th>Argument Name</th><th>Required?</th><th>Type</th><th>Description</th></tr></thead><tbody><tr><td><code>table</code></td><td>✔️</td><td>string</td><td>Name of the table to clean</td></tr><tr><td><code>old [...]
+</span></span></code></pre></div><p>Remove any files in the <code>tablelocation/data</code> folder which are not known to the table <code>db.sample</code>.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>CALL</span> <span style=color:#66d9ef>catalog_name</span>.<span style=color:#66d9ef>system</span>.remove_orphan_fi [...]
+</span></span></code></pre></div><h3 id=rewrite_data_files><code>rewrite_data_files</code></h3><p>Iceberg tracks each data file in a table. More data files leads to more metadata stored in manifest files, and small data files causes an unnecessary amount of metadata and less efficient queries from file open costs.</p><p>Iceberg can compact data files in parallel using Spark with the <code>rewriteDataFiles</code> action. This will combine small files into larger files to reduce metadata o [...]
+and <a href=../../../javadoc/1.0.0/org/apache/iceberg/actions/SortStrategy.html#field.summary><code>SortStrategy</code> Javadoc</a>
+for list of all the supported options for this action.</p><h4 id=output-6>Output</h4><table><thead><tr><th>Output Name</th><th>Type</th><th>Description</th></tr></thead><tbody><tr><td><code>rewritten_data_files_count</code></td><td>int</td><td>Number of data which were re-written by this command</td></tr><tr><td><code>added_data_files_count</code></td><td>int</td><td>Number of new data files which were written by this command</td></tr></tbody></table><h4 id=examples-3>Examples</h4><p>Rew [...]
+and also split large files according to the default write size of the table.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>CALL</span> <span style=color:#66d9ef>catalog_name</span>.<span style=color:#66d9ef>system</span>.rewrite_data_files(<span style=color:#e6db74>&#39;db.sample&#39;</span>)
+</span></span></code></pre></div><p>Rewrite the data files in table <code>db.sample</code> by sorting all the data on id and name
+using the same defaults as bin-pack to determine which files to rewrite.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>CALL</span> <span style=color:#66d9ef>catalog_name</span>.<span style=color:#66d9ef>system</span>.rewrite_data_files(<span style=color:#66d9ef>table</span> <span style=color:#f92672>=&gt;</span> <s [...]
+</span></span></code></pre></div><p>Rewrite the data files in table <code>db.sample</code> by zOrdering on column c1 and c2.
+Using the same defaults as bin-pack to determine which files to rewrite.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>CALL</span> <span style=color:#66d9ef>catalog_name</span>.<span style=color:#66d9ef>system</span>.rewrite_data_files(<span style=color:#66d9ef>table</span> <span style=color:#f92672>=&gt;</span> <s [...]
+</span></span></code></pre></div><p>Rewrite the data files in table <code>db.sample</code> using bin-pack strategy in any partition where more than 2 or more files need to be rewritten.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>CALL</span> <span style=color:#66d9ef>catalog_name</span>.<span style=color:#66d9ef> [...]
+</span></span></code></pre></div><p>Rewrite the data files in table <code>db.sample</code> and select the files that may contain data matching the filter (id = 3 and name = &ldquo;foo&rdquo;) to be rewritten.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>CALL</span> <span style=color:#66d9ef>catalog_name</span>.<sp [...]
+</span></span></code></pre></div><h3 id=rewrite_manifests><code>rewrite_manifests</code></h3><p>Rewrite manifests for a table to optimize scan planning.</p><p>Data files in manifests are sorted by fields in the partition spec. This procedure runs in parallel using a Spark job.</p><p>See the <a href=../../../javadoc/1.0.0/org/apache/iceberg/actions/RewriteManifests.html><code>RewriteManifests</code> Javadoc</a>
+to see more configuration options.</p><div class=info>This procedure invalidates all cached Spark plans that reference the affected table.</div><h4 id=usage-8>Usage</h4><table><thead><tr><th>Argument Name</th><th>Required?</th><th>Type</th><th>Description</th></tr></thead><tbody><tr><td><code>table</code></td><td>✔️</td><td>string</td><td>Name of the table to update</td></tr><tr><td><code>use_caching</code></td><td>️</td><td>boolean</td><td>Use Spark caching during operation (defaults to [...]
+</span></span></code></pre></div><p>Rewrite the manifests in table <code>db.sample</code> and disable the use of Spark caching. This could be done to avoid memory issues on executors.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>CALL</span> <span style=color:#66d9ef>catalog_name</span>.<span style=color:#66d9ef>sy [...]
+</span></span></code></pre></div><h2 id=table-migration>Table migration</h2><p>The <code>snapshot</code> and <code>migrate</code> procedures help test and migrate existing Hive or Spark tables to Iceberg.</p><h3 id=snapshot><code>snapshot</code></h3><p>Create a light-weight temporary copy of a table for testing, without changing the source table.</p><p>The newly created table can be changed or written to without affecting the source table, but the snapshot uses the original table&rsquo;s [...]
+actions like <code>expire_snapshots</code> which would physically delete data files. Iceberg deletes, which only effect metadata,
+are still allowed. In addition, any operations which affect the original data files will disrupt the Snapshot&rsquo;s
+integrity. DELETE statements executed against the original Hive table will remove original data files and the
+<code>snapshot</code> table will no longer be able to access them.</div><p>See <a href=#migrate><code>migrate</code></a> to replace an existing table with an Iceberg table.</p><h4 id=usage-9>Usage</h4><table><thead><tr><th>Argument Name</th><th>Required?</th><th>Type</th><th>Description</th></tr></thead><tbody><tr><td><code>source_table</code></td><td>✔️</td><td>string</td><td>Name of the table to snapshot</td></tr><tr><td><code>table</code></td><td>✔️</td><td>string</td><td>Name of the  [...]
+catalog&rsquo;s default location for <code>db.snap</code>.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>CALL</span> <span style=color:#66d9ef>catalog_name</span>.<span style=color:#66d9ef>system</span>.snapshot(<span style=color:#e6db74>&#39;db.sample&#39;</span>, <span style=color:#e6db74>&#39;db.snap&#39;</span>)
+</span></span></code></pre></div><p>Migrate an isolated Iceberg table which references table <code>db.sample</code> named <code>db.snap</code> at
+a manually specified location <code>/tmp/temptable/</code>.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>CALL</span> <span style=color:#66d9ef>catalog_name</span>.<span style=color:#66d9ef>system</span>.snapshot(<span style=color:#e6db74>&#39;db.sample&#39;</span>, <span style=color:#e6db74>&#39;db.snap&#39;</span [...]
+</span></span></code></pre></div><h3 id=migrate><code>migrate</code></h3><p>Replace a table with an Iceberg table, loaded with the source&rsquo;s data files.</p><p>Table schema, partitioning, properties, and location will be copied from the source table.</p><p>Migrate will fail if any table partition uses an unsupported format. Supported formats are Avro, Parquet, and ORC.
+Existing data files are added to the Iceberg table&rsquo;s metadata and can be read using a name-to-id mapping created from the original table schema.</p><p>To leave the original table intact while testing, use <a href=#snapshot><code>snapshot</code></a> to create new temporary table that shares source data files and schema.</p><p>By default, the original table is retained with the name <code>table_BACKUP_</code>.</p><h4 id=usage-10>Usage</h4><table><thead><tr><th>Argument Name</th><th>R [...]
+</span></span></code></pre></div><p>Migrate <code>db.sample</code> in the current catalog to an Iceberg table without adding any additional properties:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>CALL</span> <span style=color:#66d9ef>catalog_name</span>.<span style=color:#66d9ef>system</span>.migrate(<span style= [...]
+</span></span></code></pre></div><h3 id=add_files><code>add_files</code></h3><p>Attempts to directly add files from a Hive or file based table into a given Iceberg table. Unlike migrate or
+snapshot, <code>add_files</code> can import files from a specific partition or partitions and does not create a new Iceberg table.
+This command will create metadata for the new files and will not move them. This procedure will not analyze the schema
+of the files to determine if they actually match the schema of the Iceberg table. Upon completion, the Iceberg table
+will then treat these files as if they are part of the set of files owned by Iceberg. This means any subsequent
+<code>expire_snapshot</code> calls will be able to physically delete the added files. This method should not be used if
+<code>migrate</code> or <code>snapshot</code> are possible.</p><h4 id=usage-11>Usage</h4><table><thead><tr><th>Argument Name</th><th>Required?</th><th>Type</th><th>Description</th></tr></thead><tbody><tr><td><code>table</code></td><td>✔️</td><td>string</td><td>Table which will have files added to</td></tr><tr><td><code>source_table</code></td><td>✔️</td><td>string</td><td>Table where files should come from, paths are also possible in the form of `file_format`.`path`</td></tr><tr><td><cod [...]
+<code>db.tbl</code>. Only add files that exist within partitions where <code>part_col_1</code> is equal to <code>A</code>.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>CALL</span> spark_catalog.<span style=color:#66d9ef>system</span>.add_files(
+</span></span><span style=display:flex><span><span style=color:#66d9ef>table</span> <span style=color:#f92672>=&gt;</span> <span style=color:#e6db74>&#39;db.tbl&#39;</span>,
+</span></span><span style=display:flex><span>source_table <span style=color:#f92672>=&gt;</span> <span style=color:#e6db74>&#39;db.src_tbl&#39;</span>,
+</span></span><span style=display:flex><span>partition_filter <span style=color:#f92672>=&gt;</span> <span style=color:#66d9ef>map</span>(<span style=color:#e6db74>&#39;part_col_1&#39;</span>, <span style=color:#e6db74>&#39;A&#39;</span>)
+</span></span><span style=display:flex><span>)
+</span></span></code></pre></div><p>Add files from a <code>parquet</code> file based table at location <code>path/to/table</code> to the Iceberg table <code>db.tbl</code>. Add all
+files regardless of what partition they belong to.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>CALL</span> spark_catalog.<span style=color:#66d9ef>system</span>.add_files(
+</span></span><span style=display:flex><span>  <span style=color:#66d9ef>table</span> <span style=color:#f92672>=&gt;</span> <span style=color:#e6db74>&#39;db.tbl&#39;</span>,
+</span></span><span style=display:flex><span>  source_table <span style=color:#f92672>=&gt;</span> <span style=color:#e6db74>&#39;`parquet`.`path/to/table`&#39;</span>
+</span></span><span style=display:flex><span>)
+</span></span></code></pre></div><h2 id=metadata-information><code>Metadata information</code></h2><h3 id=ancestors_of><code>ancestors_of</code></h3><p>Report the live snapshot IDs of parents of a specified snapshot</p><h4 id=usage-12>Usage</h4><table><thead><tr><th>Argument Name</th><th>Required?</th><th>Type</th><th>Description</th></tr></thead><tbody><tr><td><code>table</code></td><td>✔️</td><td>string</td><td>Name of the table to report live snapshot IDs</td></tr><tr><td><code>snapsh [...]
+</span></span><span style=display:flex><span>      <span style=color:#ae81ff>\ </span>-&gt; C<span style=color:#e6db74>&#39; -&gt; (D&#39;</span><span style=color:#f92672>)</span>
+</span></span></code></pre></div><p>Not specifying the snapshot ID would return A -> B -> C&rsquo; -> D&rsquo;, while providing the snapshot ID of
+D as an argument would return A-> B -> C -> D</p></blockquote><h4 id=output-10>Output</h4><table><thead><tr><th>Output Name</th><th>Type</th><th>Description</th></tr></thead><tbody><tr><td><code>snapshot_id</code></td><td>long</td><td>the ancestor snapshot id</td></tr><tr><td><code>timestamp</code></td><td>long</td><td>snapshot creation time</td></tr></tbody></table><h4 id=examples-8>Examples</h4><p>Get all the snapshot ancestors of current snapshots(default)</p><div class=highlight><pre [...]
+</span></span></code></pre></div><p>Get all the snapshot ancestors by a particular snapshot</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>CALL</span> spark_catalog.<span style=color:#66d9ef>system</span>.ancestors_of(<span style=color:#e6db74>&#39;db.tbl&#39;</span>, <span style=color:#ae81ff>1</span>)
+</span></span><span style=display:flex><span><span style=color:#66d9ef>CALL</span> spark_catalog.<span style=color:#66d9ef>system</span>.ancestors_of(snapshot_id <span style=color:#f92672>=&gt;</span> <span style=color:#ae81ff>1</span>, <span style=color:#66d9ef>table</span> <span style=color:#f92672>=&gt;</span> <span style=color:#e6db74>&#39;db.tbl&#39;</span>)
+</span></span></code></pre></div></div><div id=toc class=markdown-body><div id=full><nav id=TableOfContents><ul><li><a href=#usage>Usage</a><ul><li><a href=#named-arguments>Named arguments</a></li><li><a href=#positional-arguments>Positional arguments</a></li></ul></li><li><a href=#snapshot-management>Snapshot management</a><ul><li><a href=#rollback_to_snapshot><code>rollback_to_snapshot</code></a></li><li><a href=#rollback_to_timestamp><code>rollback_to_timestamp</code></a></li><li><a h [...]
+<script src=https://iceberg.apache.org/docs/1.1.0//js/jquery.easing.min.js></script>
+<script type=text/javascript src=https://iceberg.apache.org/docs/1.1.0//js/search.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/bootstrap.min.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/iceberg-theme.js></script></html>
\ No newline at end of file
diff --git a/docs/1.1.0/spark-queries/index.html b/docs/1.1.0/spark-queries/index.html
new file mode 100644
index 00000000..b6e2ee4d
--- /dev/null
+++ b/docs/1.1.0/spark-queries/index.html
@@ -0,0 +1,95 @@
+<!doctype html><html><head><meta charset=utf-8><meta http-equiv=x-ua-compatible content="IE=edge"><meta name=viewport content="width=device-width,initial-scale=1"><meta name=description content><meta name=author content><title>Queries</title><link href=../css/bootstrap.css rel=stylesheet><link href=../css/markdown.css rel=stylesheet><link href=../css/katex.min.css rel=stylesheet><link href=../css/iceberg-theme.css rel=stylesheet><link href=../font-awesome-4.7.0/css/font-awesome.min.css r [...]
+<span class=sr-only>Toggle navigation</span>
+<span class=icon-bar></span>
+<span class=icon-bar></span>
+<span class=icon-bar></span></button>
+<a class="page-scroll navbar-brand" href=https://iceberg.apache.org/><img class=top-navbar-logo src=https://iceberg.apache.org/docs/1.1.0//img/iceberg-logo-icon.png> Apache Iceberg</a></div><div><input type=search class=form-control id=search-input placeholder=Search... maxlength=64 data-hotkeys=s/></div><div class=versions-dropdown><span>1.0.0</span> <i class="fa fa-chevron-down"></i><div class=versions-dropdown-content><ul><li class=versions-dropdown-selection><a href=https://iceberg.a [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Tables class=collapse><ul class=sub-menu><li><a href=../configuration/>Configuration</a></li><li><a href=../evolution/>Evolution</a></li><li><a href=../maintenance/>Maintenance</a></li><li><a href=../partitioning/>Partitioning</a></li><li><a href=../performance/>Performance</a></li><li><a href=../reliability/>Reliability</a></li><li><a href=../schemas/>Schemas</a></li></ul></div><li><a class=chevron-toggle data-toggle=collapse data-paren [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Spark class="collapse in"><ul class=sub-menu><li><a href=../spark-ddl/>DDL</a></li><li><a href=../getting-started/>Getting Started</a></li><li><a href=../spark-procedures/>Procedures</a></li><li><a id=active href=../spark-queries/>Queries</a></li><li><a href=../spark-structured-streaming/>Structured Streaming</a></li><li><a href=../spark-writes/>Writes</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-p [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Flink class=collapse><ul class=sub-menu><li><a href=../flink/>Enabling Iceberg in Flink</a></li><li><a href=../flink-connector/>Flink Connector</a></li></ul></div><li><a href=../hive/><span>Hive</span></a></li><li><a target=_blank href=https://trino.io/docs/current/connector/iceberg.html><span>Trino</span></a></li><li><a target=_blank href=https://prestodb.io/docs/current/connector/iceberg.html><span>Presto</span></a></li><li><a target=_ [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Integrations class=collapse><ul class=sub-menu><li><a href=../aws/>AWS</a></li><li><a href=../dell/>Dell</a></li><li><a href=../jdbc/>JDBC</a></li><li><a href=../nessie/>Nessie</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href=#API><span>API</span>
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=API class=collapse><ul class=sub-menu><li><a href=../java-api-quickstart/>Java Quickstart</a></li><li><a href=../api/>Java API</a></li><li><a href=../custom-catalog/>Java Custom Catalog</a></li></ul></div><li><a href=https://iceberg.apache.org/docs/1.1.0/../../javadoc/latest><span>Javadoc</span></a></li></div></div><div id=content class=markdown-body><div class=margin-for-toc><h1 id=spark-queries>Spark Queries</h1><p>To use Iceberg in Sp [...]
+</span></span></span></code></pre></div><p>Metadata tables, like <code>history</code> and <code>snapshots</code>, can use the Iceberg table name as a namespace.</p><p>For example, to read from the <code>files</code> metadata table for <code>prod.db.table</code>:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>SELECT< [...]
+</span></span></code></pre></div><table><thead><tr><th>content</th><th>file_path</th><th>file_format</th><th>spec_id</th><th>partition</th><th>record_count</th><th>file_size_in_bytes</th><th>column_sizes</th><th>value_counts</th><th>null_value_counts</th><th>nan_value_counts</th><th>lower_bounds</th><th>upper_bounds</th><th>key_metadata</th><th>split_offsets</th><th>equality_ids</th><th>sort_order_id</th></tr></thead><tbody><tr><td>0</td><td>s3:/&mldr;/table/data/00000-3-8d6d60e8-d427-48 [...]
+</span></span></code></pre></div><h3 id=catalogs-with-dataframereader>Catalogs with DataFrameReader</h3><p>Iceberg 0.11.0 adds multi-catalog support to <code>DataFrameReader</code> in both Spark 3 and 2.4.</p><p>Paths and table names can be loaded with Spark&rsquo;s <code>DataFrameReader</code> interface. How tables are loaded depends on how
+the identifier is specified. When using <code>spark.read.format("iceberg").load(table)</code> or <code>spark.table(table)</code> the <code>table</code>
+variable can take a number of forms as listed below:</p><ul><li><code>file:///path/to/table</code>: loads a HadoopTable at given path</li><li><code>tablename</code>: loads <code>currentCatalog.currentNamespace.tablename</code></li><li><code>catalog.tablename</code>: loads <code>tablename</code> from the specified catalog.</li><li><code>namespace.tablename</code>: loads <code>namespace.tablename</code> from current catalog</li><li><code>catalog.namespace.tablename</code>: loads <code>name [...]
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span><span style=color:#66d9ef>SELECT</span> <span style=color:#f92672>*</span> <span style=color:#66d9ef>FROM</span> prod.db.<span style=color:#66d9ef>table</span> <span style=color:#66d9ef>TIMESTAMP</span> <span style=color:#66d9ef>AS</span> <span style=color:#66d9ef>OF</span> <span style=color:#e6db74>&#39;1986-10-26 01:21:00&#39;</span>;
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#75715e>-- time travel to snapshot with id 10963874102873L
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span><span style=color:#66d9ef>SELECT</span> <span style=color:#f92672>*</span> <span style=color:#66d9ef>FROM</span> prod.db.<span style=color:#66d9ef>table</span> <span style=color:#66d9ef>VERSION</span> <span style=color:#66d9ef>AS</span> <span style=color:#66d9ef>OF</span> <span style=color:#ae81ff>10963874102873</span>;
+</span></span></code></pre></div><p>In addition, <code>FOR SYSTEM_TIME AS OF</code> and <code>FOR SYSTEM_VERSION AS OF</code> clauses are also supported:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>SELECT</span> <span style=color:#f92672>*</span> <span style=color:#66d9ef>FROM</span> prod.db.<span style=color:#66 [...]
+</span></span><span style=display:flex><span><span style=color:#66d9ef>SELECT</span> <span style=color:#f92672>*</span> <span style=color:#66d9ef>FROM</span> prod.db.<span style=color:#66d9ef>table</span> <span style=color:#66d9ef>FOR</span> SYSTEM_VERSION <span style=color:#66d9ef>AS</span> <span style=color:#66d9ef>OF</span> <span style=color:#ae81ff>10963874102873</span>;
+</span></span></code></pre></div><p>Timestamps may also be supplied as a Unix timestamp, in seconds:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#75715e>-- timestamp in seconds
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span><span style=color:#66d9ef>SELECT</span> <span style=color:#f92672>*</span> <span style=color:#66d9ef>FROM</span> prod.db.<span style=color:#66d9ef>table</span> <span style=color:#66d9ef>TIMESTAMP</span> <span style=color:#66d9ef>AS</span> <span style=color:#66d9ef>OF</span> <span style=color:#ae81ff>499162860</span>;
+</span></span><span style=display:flex><span><span style=color:#66d9ef>SELECT</span> <span style=color:#f92672>*</span> <span style=color:#66d9ef>FROM</span> prod.db.<span style=color:#66d9ef>table</span> <span style=color:#66d9ef>FOR</span> SYSTEM_TIME <span style=color:#66d9ef>AS</span> <span style=color:#66d9ef>OF</span> <span style=color:#ae81ff>499162860</span>;
+</span></span></code></pre></div><h4 id=dataframe>DataFrame</h4><p>To select a specific table snapshot or the snapshot at some time in the DataFrame API, Iceberg supports two Spark read options:</p><ul><li><code>snapshot-id</code> selects a specific table snapshot</li><li><code>as-of-timestamp</code> selects the current snapshot at a timestamp, in milliseconds</li></ul><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size [...]
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>spark<span style=color:#f92672>.</span>read
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>option<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;as-of-timestamp&#34;</span><span style=color:#f92672>,</span> <span style=color:#e6db74>&#34;499162860000&#34;</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>format<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;iceberg&#34;</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>load<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;path/to/table&#34;</span><span style=color:#f92672>)</span>
+</span></span></code></pre></div><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-scala data-lang=scala><span style=display:flex><span><span style=color:#75715e>// time travel to snapshot with ID 10963874102873L
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>spark<span style=color:#f92672>.</span>read
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>option<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;snapshot-id&#34;</span><span style=color:#f92672>,</span> <span style=color:#ae81ff>10963874102873L</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>format<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;iceberg&#34;</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>load<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;path/to/table&#34;</span><span style=color:#f92672>)</span>
+</span></span></code></pre></div><div class=info>Spark 3.0 and earlier versions do not support using <code>option</code> with <code>table</code> in DataFrameReader commands. All options will be silently
+ignored. Do not use <code>table</code> when attempting to time-travel or use other options. See <a href=https://issues.apache.org/jira/browse/SPARK-32592>SPARK-32592</a>.</div><h3 id=incremental-read>Incremental read</h3><p>To read appended data incrementally, use:</p><ul><li><code>start-snapshot-id</code> Start snapshot ID used in incremental scans (exclusive).</li><li><code>end-snapshot-id</code> End snapshot ID used in incremental scans (inclusive). This is optional. Omitting it will  [...]
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>spark<span style=color:#f92672>.</span>read<span style=color:#f92672>()</span>
+</span></span><span style=display:flex><span>  <span style=color:#f92672>.</span>format<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;iceberg&#34;</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>  <span style=color:#f92672>.</span>option<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;start-snapshot-id&#34;</span><span style=color:#f92672>,</span> <span style=color:#e6db74>&#34;10963874102873&#34;</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>  <span style=color:#f92672>.</span>option<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;end-snapshot-id&#34;</span><span style=color:#f92672>,</span> <span style=color:#e6db74>&#34;63874143573109&#34;</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>  <span style=color:#f92672>.</span>load<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;path/to/table&#34;</span><span style=color:#f92672>)</span>
+</span></span></code></pre></div><div class=info>Currently gets only the data from <code>append</code> operation. Cannot support <code>replace</code>, <code>overwrite</code>, <code>delete</code> operations.
+Incremental read works with both V1 and V2 format-version.
+Incremental read is not supported by Spark&rsquo;s SQL syntax.</div><h3 id=spark-24>Spark 2.4</h3><p>Spark 2.4 requires using the DataFrame reader with <code>iceberg</code> as a format, because 2.4 does not support direct SQL queries:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-scala data-lang=scala><span style=display:flex><span><span style=color:#75715e>// named metastore table
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>spark<span style=color:#f92672>.</span>read<span style=color:#f92672>.</span>format<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;iceberg&#34;</span><span style=color:#f92672>).</span>load<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;catalog.db.table&#34;</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span><span style=color:#75715e>// Hadoop path table
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>spark<span style=color:#f92672>.</span>read<span style=color:#f92672>.</span>format<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;iceberg&#34;</span><span style=color:#f92672>).</span>load<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;hdfs://nn:8020/path/to/table&#34;</span><span style=color:#f92672>)</span>
+</span></span></code></pre></div><h4 id=spark-24-with-sql>Spark 2.4 with SQL</h4><p>To run SQL <code>SELECT</code> statements on Iceberg tables in 2.4, register the DataFrame as a temporary table:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-scala data-lang=scala><span style=display:flex><span><span style=color:#66d9ef>val</span> df <span style=color:#66d9ef>=</span> spark<span style=col [...]
+</span></span><span style=display:flex><span>df<span style=color:#f92672>.</span>createOrReplaceTempView<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;table&#34;</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span>spark<span style=color:#f92672>.</span>sql<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;&#34;&#34;select count(1) from table&#34;&#34;&#34;</span><span style=color:#f92672>).</span>show<span style=color:#f92672>()</span>
+</span></span></code></pre></div><h2 id=inspecting-tables>Inspecting tables</h2><p>To inspect a table&rsquo;s history, snapshots, and other metadata, Iceberg supports metadata tables.</p><p>Metadata tables are identified by adding the metadata table name after the original table name. For example, history for <code>db.table</code> is read using <code>db.table.history</code>.</p><div class=info><p>For Spark 2.4, use the <code>DataFrameReader</code> API to <a href=#inspecting-with-datafram [...]
+</span></span></code></pre></div><table><thead><tr><th>made_current_at</th><th>snapshot_id</th><th>parent_id</th><th>is_current_ancestor</th></tr></thead><tbody><tr><td>2019-02-08 03:29:51.215</td><td>5781947118336215154</td><td>NULL</td><td>true</td></tr><tr><td>2019-02-08 03:47:55.948</td><td>5179299526185056830</td><td>5781947118336215154</td><td>true</td></tr><tr><td>2019-02-09 16:24:30.13</td><td>296410040247533544</td><td>5179299526185056830</td><td>false</td></tr><tr><td>2019-02-0 [...]
+</span></span></code></pre></div><table><thead><tr><th>timestamp</th><th>file</th><th>latest_snapshot_id</th><th>latest_schema_id</th><th>latest_sequence_number</th></tr></thead><tbody><tr><td>2022-07-28 10:43:52.93</td><td>s3://&mldr;/table/metadata/00000-9441e604-b3c2-498a-a45a-6320e8ab9006.metadata.json</td><td>null</td><td>null</td><td>null</td></tr><tr><td>2022-07-28 10:43:57.487</td><td>s3://&mldr;/table/metadata/00001-f30823df-b745-4a0a-b293-7532e0c99986.metadata.json</td><td>1702 [...]
+</span></span></code></pre></div><table><thead><tr><th>committed_at</th><th>snapshot_id</th><th>parent_id</th><th>operation</th><th>manifest_list</th><th>summary</th></tr></thead><tbody><tr><td>2019-02-08 03:29:51.215</td><td>57897183625154</td><td>null</td><td>append</td><td>s3://&mldr;/table/metadata/snap-57897183625154-1.avro</td><td>{ added-records -> 2478404, total-records -> 2478404, added-data-files -> 438, total-data-files -> 438, spark.app.id -> application_1520379288616_155055  [...]
+</span></span><span style=display:flex><span>    h.made_current_at,
+</span></span><span style=display:flex><span>    s.<span style=color:#66d9ef>operation</span>,
+</span></span><span style=display:flex><span>    h.snapshot_id,
+</span></span><span style=display:flex><span>    h.is_current_ancestor,
+</span></span><span style=display:flex><span>    s.summary[<span style=color:#e6db74>&#39;spark.app.id&#39;</span>]
+</span></span><span style=display:flex><span><span style=color:#66d9ef>from</span> prod.db.<span style=color:#66d9ef>table</span>.history h
+</span></span><span style=display:flex><span><span style=color:#66d9ef>join</span> prod.db.<span style=color:#66d9ef>table</span>.snapshots s
+</span></span><span style=display:flex><span>  <span style=color:#66d9ef>on</span> h.snapshot_id <span style=color:#f92672>=</span> s.snapshot_id
+</span></span><span style=display:flex><span><span style=color:#66d9ef>order</span> <span style=color:#66d9ef>by</span> made_current_at
+</span></span></code></pre></div><table><thead><tr><th>made_current_at</th><th>operation</th><th>snapshot_id</th><th>is_current_ancestor</th><th>summary[spark.app.id]</th></tr></thead><tbody><tr><td>2019-02-08 03:29:51.215</td><td>append</td><td>57897183625154</td><td>true</td><td>application_1520379288616_155055</td></tr><tr><td>2019-02-09 16:24:30.13</td><td>delete</td><td>29641004024753</td><td>false</td><td>application_1520379288616_151109</td></tr><tr><td>2019-02-09 16:32:47.336</td [...]
+</span></span></code></pre></div><table><thead><tr><th>content</th><th>file_path</th><th>file_format</th><th>spec_id</th><th>partition</th><th>record_count</th><th>file_size_in_bytes</th><th>column_sizes</th><th>value_counts</th><th>null_value_counts</th><th>nan_value_counts</th><th>lower_bounds</th><th>upper_bounds</th><th>key_metadata</th><th>split_offsets</th><th>equality_ids</th><th>sort_order_id</th></tr></thead><tbody><tr><td>0</td><td>s3:/&mldr;/table/data/00000-3-8d6d60e8-d427-48 [...]
+</span></span></code></pre></div><table><thead><tr><th>path</th><th>length</th><th>partition_spec_id</th><th>added_snapshot_id</th><th>added_data_files_count</th><th>existing_data_files_count</th><th>deleted_data_files_count</th><th>partition_summaries</th></tr></thead><tbody><tr><td>s3://&mldr;/table/metadata/45b5290b-ee61-4788-b324-b1e2735c0e10-m0.avro</td><td>4479</td><td>0</td><td>6668963634911763636</td><td>8</td><td>0</td><td>0</td><td>[[false,null,2019-05-13,2019-05-15]]</td></tr> [...]
+This usually occurs when reading from V1 table, where <code>contains_nan</code> is not populated.</li></ol><h3 id=partitions>Partitions</h3><p>To show a table&rsquo;s current partitions:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>SELECT</span> <span style=color:#f92672>*</span> <span style=color:#66d9ef>FROM</sp [...]
+</span></span></code></pre></div><table><thead><tr><th>partition</th><th>record_count</th><th>file_count</th><th>spec_id</th></tr></thead><tbody><tr><td>{20211001, 11}</td><td>1</td><td>1</td><td>0</td></tr><tr><td>{20211002, 11}</td><td>1</td><td>1</td><td>0</td></tr><tr><td>{20211001, 10}</td><td>1</td><td>1</td><td>0</td></tr><tr><td>{20211002, 10}</td><td>1</td><td>1</td><td>0</td></tr></tbody></table><p>Note:
+For unpartitioned tables, the partitions table will contain only the record_count and file_count columns.</p><h3 id=all-metadata-tables>All Metadata Tables</h3><p>These tables are unions of the metadata tables specific to the current snapshot, and return metadata across all snapshots.</p><div class=danger>The &ldquo;all&rdquo; metadata tables may produce more than one row per data file or manifest file because metadata files may be part of more than one table snapshot.</div><h4 id=all-da [...]
+</span></span></code></pre></div><table><thead><tr><th>content</th><th>file_path</th><th>file_format</th><th>partition</th><th>record_count</th><th>file_size_in_bytes</th><th>column_sizes</th><th>value_counts</th><th>null_value_counts</th><th>nan_value_counts</th><th>lower_bounds</th><th>upper_bounds</th><th>key_metadata</th><th>split_offsets</th><th>equality_ids</th><th>sort_order_id</th></tr></thead><tbody><tr><td>0</td><td>s3://&mldr;/dt=20210102/00000-0-756e2512-49ae-45bb-aae3-c0ca47 [...]
+</span></span></code></pre></div><table><thead><tr><th>path</th><th>length</th><th>partition_spec_id</th><th>added_snapshot_id</th><th>added_data_files_count</th><th>existing_data_files_count</th><th>deleted_data_files_count</th><th>partition_summaries</th></tr></thead><tbody><tr><td>s3://&mldr;/metadata/a85f78c5-3222-4b37-b7e4-faf944425d48-m0.avro</td><td>6376</td><td>0</td><td>6272782676904868561</td><td>2</td><td>0</td><td>0</td><td>[{false, false, 20210101, 20210101}]</td></tr></tbod [...]
+This usually occurs when reading from V1 table, where <code>contains_nan</code> is not populated.</li></ol><h3 id=references>References</h3><p>To show a table&rsquo;s known snapshot references:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>SELECT</span> <span style=color:#f92672>*</span> <span style=color:#66d9ef>F [...]
+</span></span></code></pre></div><table><thead><tr><th>name</th><th>type</th><th>snapshot_id</th><th>max_reference_age_in_ms</th><th>min_snapshots_to_keep</th><th>max_snapshot_age_in_ms</th></tr></thead><tbody><tr><td>main</td><td>BRANCH</td><td>4686954189838128572</td><td>10</td><td>20</td><td>30</td></tr><tr><td>testTag</td><td>TAG</td><td>4686954189838128572</td><td>10</td><td>null</td><td>null</td></tr></tbody></table><h3 id=inspecting-with-dataframes>Inspecting with DataFrames</h3>< [...]
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>spark<span style=color:#f92672>.</span>read<span style=color:#f92672>.</span>format<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;iceberg&#34;</span><span style=color:#f92672>).</span>load<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;db.table.files&#34;</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span><span style=color:#75715e>// Hadoop path table
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>spark<span style=color:#f92672>.</span>read<span style=color:#f92672>.</span>format<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;iceberg&#34;</span><span style=color:#f92672>).</span>load<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;hdfs://nn:8020/path/to/table#files&#34;</span><span style=color:#f92672>)</span>
+</span></span></code></pre></div><h3 id=time-travel-with-metadata-tables>Time Travel with Metadata Tables</h3><p>To inspect a tables&rsquo;s metadata with the time travel feature:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#75715e>-- get the table&#39;s file manifests at timestamp Sep 20, 2021 08:00:00
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span><span style=color:#66d9ef>SELECT</span> <span style=color:#f92672>*</span> <span style=color:#66d9ef>FROM</span> prod.db.<span style=color:#66d9ef>table</span>.manifests <span style=color:#66d9ef>TIMESTAMP</span> <span style=color:#66d9ef>AS</span> <span style=color:#66d9ef>OF</span> <span style=color:#e6db74>&#39;2021-09-20 08:00:00&#39;</span>;
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#75715e>-- get the table&#39;s partitions with snapshot id 10963874102873L
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span><span style=color:#66d9ef>SELECT</span> <span style=color:#f92672>*</span> <span style=color:#66d9ef>FROM</span> prod.db.<span style=color:#66d9ef>table</span>.partitions <span style=color:#66d9ef>VERSION</span> <span style=color:#66d9ef>AS</span> <span style=color:#66d9ef>OF</span> <span style=color:#ae81ff>10963874102873</span>;
+</span></span></code></pre></div><p>Metadata tables can also be inspected with time travel using the DataFrameReader API:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-scala data-lang=scala><span style=display:flex><span><span style=color:#75715e>// load the table&#39;s file metadata at snapshot-id 10963874102873 as DataFrame
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span>spark<span style=color:#f92672>.</span>read<span style=color:#f92672>.</span>format<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;iceberg&#34;</span><span style=color:#f92672>).</span>option<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;snapshot-id&#34;</span><span style=color:#f92672>,</span> <span style=color:#ae81ff>10963874102873L</span><span style=color:#f92672>). [...]
+</span></span></code></pre></div></div><div id=toc class=markdown-body><div id=full><nav id=TableOfContents><ul><li><a href=#querying-with-sql>Querying with SQL</a></li><li><a href=#querying-with-dataframes>Querying with DataFrames</a><ul><li><a href=#catalogs-with-dataframereader>Catalogs with DataFrameReader</a></li><li><a href=#time-travel>Time travel</a></li><li><a href=#incremental-read>Incremental read</a></li><li><a href=#spark-24>Spark 2.4</a></li></ul></li><li><a href=#inspectin [...]
+<script src=https://iceberg.apache.org/docs/1.1.0//js/jquery.easing.min.js></script>
+<script type=text/javascript src=https://iceberg.apache.org/docs/1.1.0//js/search.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/bootstrap.min.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/iceberg-theme.js></script></html>
\ No newline at end of file
diff --git a/docs/1.1.0/spark-structured-streaming/index.html b/docs/1.1.0/spark-structured-streaming/index.html
new file mode 100644
index 00000000..3af90a9d
--- /dev/null
+++ b/docs/1.1.0/spark-structured-streaming/index.html
@@ -0,0 +1,53 @@
+<!doctype html><html><head><meta charset=utf-8><meta http-equiv=x-ua-compatible content="IE=edge"><meta name=viewport content="width=device-width,initial-scale=1"><meta name=description content><meta name=author content><title>Structured Streaming</title><link href=../css/bootstrap.css rel=stylesheet><link href=../css/markdown.css rel=stylesheet><link href=../css/katex.min.css rel=stylesheet><link href=../css/iceberg-theme.css rel=stylesheet><link href=../font-awesome-4.7.0/css/font-awes [...]
+<span class=sr-only>Toggle navigation</span>
+<span class=icon-bar></span>
+<span class=icon-bar></span>
+<span class=icon-bar></span></button>
+<a class="page-scroll navbar-brand" href=https://iceberg.apache.org/><img class=top-navbar-logo src=https://iceberg.apache.org/docs/1.1.0//img/iceberg-logo-icon.png> Apache Iceberg</a></div><div><input type=search class=form-control id=search-input placeholder=Search... maxlength=64 data-hotkeys=s/></div><div class=versions-dropdown><span>1.0.0</span> <i class="fa fa-chevron-down"></i><div class=versions-dropdown-content><ul><li class=versions-dropdown-selection><a href=https://iceberg.a [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Tables class=collapse><ul class=sub-menu><li><a href=../configuration/>Configuration</a></li><li><a href=../evolution/>Evolution</a></li><li><a href=../maintenance/>Maintenance</a></li><li><a href=../partitioning/>Partitioning</a></li><li><a href=../performance/>Performance</a></li><li><a href=../reliability/>Reliability</a></li><li><a href=../schemas/>Schemas</a></li></ul></div><li><a class=chevron-toggle data-toggle=collapse data-paren [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Spark class="collapse in"><ul class=sub-menu><li><a href=../spark-ddl/>DDL</a></li><li><a href=../getting-started/>Getting Started</a></li><li><a href=../spark-procedures/>Procedures</a></li><li><a href=../spark-queries/>Queries</a></li><li><a id=active href=../spark-structured-streaming/>Structured Streaming</a></li><li><a href=../spark-writes/>Writes</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-p [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Flink class=collapse><ul class=sub-menu><li><a href=../flink/>Enabling Iceberg in Flink</a></li><li><a href=../flink-connector/>Flink Connector</a></li></ul></div><li><a href=../hive/><span>Hive</span></a></li><li><a target=_blank href=https://trino.io/docs/current/connector/iceberg.html><span>Trino</span></a></li><li><a target=_blank href=https://prestodb.io/docs/current/connector/iceberg.html><span>Presto</span></a></li><li><a target=_ [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Integrations class=collapse><ul class=sub-menu><li><a href=../aws/>AWS</a></li><li><a href=../dell/>Dell</a></li><li><a href=../jdbc/>JDBC</a></li><li><a href=../nessie/>Nessie</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href=#API><span>API</span>
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=API class=collapse><ul class=sub-menu><li><a href=../java-api-quickstart/>Java Quickstart</a></li><li><a href=../api/>Java API</a></li><li><a href=../custom-catalog/>Java Custom Catalog</a></li></ul></div><li><a href=https://iceberg.apache.org/docs/1.1.0/../../javadoc/latest><span>Javadoc</span></a></li></div></div><div id=content class=markdown-body><div class=margin-for-toc><h1 id=spark-structured-streaming>Spark Structured Streaming</ [...]
+with different levels of support in Spark versions.</p><p>As of Spark 3, DataFrame reads and writes are supported.</p><table><thead><tr><th>Feature support</th><th>Spark 3</th><th>Spark 2.4</th><th>Notes</th></tr></thead><tbody><tr><td><a href=#streaming-writes>DataFrame write</a></td><td>✔</td><td>✔</td><td></td></tr></tbody></table><h2 id=streaming-reads>Streaming Reads</h2><p>Iceberg supports processing incremental data in spark structured streaming jobs which starts from a historical [...]
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>format<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;iceberg&#34;</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>option<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;stream-from-timestamp&#34;</span><span style=color:#f92672>,</span> <span style=color:#a6e22e>Long</span><span style=color:#f92672>.</span>toString<span style=color:#f92672>(</span>streamStartTimestamp<span style=color:#f92672>))</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>load<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;database.table_name&#34;</span><span style=color:#f92672>)</span>
+</span></span></code></pre></div><div class=warning>Iceberg only supports reading data from append snapshots. Overwrite snapshots cannot be processed and will cause an exception by default. Overwrites may be ignored by setting <code>streaming-skip-overwrite-snapshots=true</code>. Similarly, delete snapshots will cause an exception by default, and deletes may be ignored by setting <code>streaming-skip-delete-snapshots=true</code>.</div><h2 id=streaming-writes>Streaming Writes</h2><p>To wr [...]
+</span></span><span style=display:flex><span>data<span style=color:#f92672>.</span>writeStream
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>format<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;iceberg&#34;</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>outputMode<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;append&#34;</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>trigger<span style=color:#f92672>(</span><span style=color:#a6e22e>Trigger</span><span style=color:#f92672>.</span><span style=color:#a6e22e>ProcessingTime</span><span style=color:#f92672>(</span><span style=color:#ae81ff>1</span><span style=color:#f92672>,</span> <span style=color:#a6e22e>TimeUnit</span><span style=color:#f92672>.</span><span style=color:#a6e22e>MINUTES</span><span style=color:#f92672>))</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>option<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;path&#34;</span><span style=color:#f92672>,</span> tableIdentifier<span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>option<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;checkpointLocation&#34;</span><span style=color:#f92672>,</span> checkpointPath<span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>start<span style=color:#f92672>()</span>
+</span></span></code></pre></div><p>The <code>tableIdentifier</code> can be:</p><ul><li>The fully-qualified path to a HDFS table, like <code>hdfs://nn:8020/path/to/table</code></li><li>A table name if the table is tracked by a catalog, like <code>database.table_name</code></li></ul><p>Iceberg doesn&rsquo;t support &ldquo;continuous processing&rdquo;, as it doesn&rsquo;t provide the interface to &ldquo;commit&rdquo; the output.</p><p>Iceberg supports <code>append</code> and <code>complete [...]
+on Spark page to see how to create the Iceberg table.</p><h3 id=writing-against-partitioned-table>Writing against partitioned table</h3><p>Iceberg requires the data to be sorted according to the partition spec per task (Spark partition) in prior to write
+against partitioned table. For batch queries you&rsquo;re encouraged to do explicit sort to fulfill the requirement
+(see <a href=../spark-writes/#writing-to-partitioned-tables>here</a>), but the approach would bring additional latency as
+repartition and sort are considered as heavy operations for streaming workload. To avoid additional latency, you can
+enable fanout writer to eliminate the requirement.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-scala data-lang=scala><span style=display:flex><span><span style=color:#66d9ef>val</span> tableIdentifier<span style=color:#66d9ef>:</span> <span style=color:#66d9ef>String</span> <span style=color:#f92672>=</span> <span style=color:#f92672>...</span>
+</span></span><span style=display:flex><span>data<span style=color:#f92672>.</span>writeStream
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>format<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;iceberg&#34;</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>outputMode<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;append&#34;</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>trigger<span style=color:#f92672>(</span><span style=color:#a6e22e>Trigger</span><span style=color:#f92672>.</span><span style=color:#a6e22e>ProcessingTime</span><span style=color:#f92672>(</span><span style=color:#ae81ff>1</span><span style=color:#f92672>,</span> <span style=color:#a6e22e>TimeUnit</span><span style=color:#f92672>.</span><span style=color:#a6e22e>MINUTES</span><span style=color:#f92672>))</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>option<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;path&#34;</span><span style=color:#f92672>,</span> tableIdentifier<span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>option<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;fanout-enabled&#34;</span><span style=color:#f92672>,</span> <span style=color:#e6db74>&#34;true&#34;</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>option<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;checkpointLocation&#34;</span><span style=color:#f92672>,</span> checkpointPath<span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>start<span style=color:#f92672>()</span>
+</span></span></code></pre></div><p>Fanout writer opens the files per partition value and doesn&rsquo;t close these files till write task is finished.
+This functionality is discouraged for batch query, as explicit sort against output rows isn&rsquo;t expensive for batch workload.</p><h2 id=maintenance-for-streaming-tables>Maintenance for streaming tables</h2><p>Streaming queries can create new table versions quickly, which creates lots of table metadata to track those versions.
+Maintaining metadata by tuning the rate of commits, expiring old snapshots, and automatically cleaning up metadata files
+is highly recommended.</p><h3 id=tune-the-rate-of-commits>Tune the rate of commits</h3><p>Having high rate of commits would produce lots of data files, manifests, and snapshots which leads the table hard
+to maintain. We encourage having trigger interval 1 minute at minimum, and increase the interval if needed.</p><p>The triggers section in <a href=https://spark.apache.org/docs/latest/structured-streaming-programming-guide.html#triggers>Structured Streaming Programming Guide</a>
+documents how to configure the interval.</p><h3 id=expire-old-snapshots>Expire old snapshots</h3><p>Each micro-batch written to a table produces a new snapshot, which are tracked in table metadata until they are expired to remove the metadata and any data files that are no longer needed. Snapshots accumulate quickly with frequent commits, so it is highly recommended that tables written by streaming queries are <a href=../maintenance#expire-snapshots>regularly maintained</a>.</p><h3 id=co [...]
+This could lead lots of small manifest files. Manifests can be <a href=../maintenance#rewrite-manifests>rewritten to optimize queries and to compact</a>.</p></div><div id=toc class=markdown-body><div id=full><nav id=TableOfContents><ul><li><a href=#streaming-reads>Streaming Reads</a></li><li><a href=#streaming-writes>Streaming Writes</a><ul><li><a href=#writing-against-partitioned-table>Writing against partitioned table</a></li></ul></li><li><a href=#maintenance-for-streaming-tables>Main [...]
+<script src=https://iceberg.apache.org/docs/1.1.0//js/jquery.easing.min.js></script>
+<script type=text/javascript src=https://iceberg.apache.org/docs/1.1.0//js/search.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/bootstrap.min.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/iceberg-theme.js></script></html>
\ No newline at end of file
diff --git a/docs/1.1.0/spark-writes/index.html b/docs/1.1.0/spark-writes/index.html
new file mode 100644
index 00000000..b904c0f6
--- /dev/null
+++ b/docs/1.1.0/spark-writes/index.html
@@ -0,0 +1,127 @@
+<!doctype html><html><head><meta charset=utf-8><meta http-equiv=x-ua-compatible content="IE=edge"><meta name=viewport content="width=device-width,initial-scale=1"><meta name=description content><meta name=author content><title>Writes</title><link href=../css/bootstrap.css rel=stylesheet><link href=../css/markdown.css rel=stylesheet><link href=../css/katex.min.css rel=stylesheet><link href=../css/iceberg-theme.css rel=stylesheet><link href=../font-awesome-4.7.0/css/font-awesome.min.css re [...]
+<span class=sr-only>Toggle navigation</span>
+<span class=icon-bar></span>
+<span class=icon-bar></span>
+<span class=icon-bar></span></button>
+<a class="page-scroll navbar-brand" href=https://iceberg.apache.org/><img class=top-navbar-logo src=https://iceberg.apache.org/docs/1.1.0//img/iceberg-logo-icon.png> Apache Iceberg</a></div><div><input type=search class=form-control id=search-input placeholder=Search... maxlength=64 data-hotkeys=s/></div><div class=versions-dropdown><span>1.0.0</span> <i class="fa fa-chevron-down"></i><div class=versions-dropdown-content><ul><li class=versions-dropdown-selection><a href=https://iceberg.a [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Tables class=collapse><ul class=sub-menu><li><a href=../configuration/>Configuration</a></li><li><a href=../evolution/>Evolution</a></li><li><a href=../maintenance/>Maintenance</a></li><li><a href=../partitioning/>Partitioning</a></li><li><a href=../performance/>Performance</a></li><li><a href=../reliability/>Reliability</a></li><li><a href=../schemas/>Schemas</a></li></ul></div><li><a class=chevron-toggle data-toggle=collapse data-paren [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Spark class="collapse in"><ul class=sub-menu><li><a href=../spark-ddl/>DDL</a></li><li><a href=../getting-started/>Getting Started</a></li><li><a href=../spark-procedures/>Procedures</a></li><li><a href=../spark-queries/>Queries</a></li><li><a href=../spark-structured-streaming/>Structured Streaming</a></li><li><a id=active href=../spark-writes/>Writes</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-p [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Flink class=collapse><ul class=sub-menu><li><a href=../flink/>Enabling Iceberg in Flink</a></li><li><a href=../flink-connector/>Flink Connector</a></li></ul></div><li><a href=../hive/><span>Hive</span></a></li><li><a target=_blank href=https://trino.io/docs/current/connector/iceberg.html><span>Trino</span></a></li><li><a target=_blank href=https://prestodb.io/docs/current/connector/iceberg.html><span>Presto</span></a></li><li><a target=_ [...]
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=Integrations class=collapse><ul class=sub-menu><li><a href=../aws/>AWS</a></li><li><a href=../dell/>Dell</a></li><li><a href=../jdbc/>JDBC</a></li><li><a href=../nessie/>Nessie</a></li></ul></div><li><a class="chevron-toggle collapsed" data-toggle=collapse data-parent=full href=#API><span>API</span>
+<i class="fa fa-chevron-right"></i>
+<i class="fa fa-chevron-down"></i></a></li><div id=API class=collapse><ul class=sub-menu><li><a href=../java-api-quickstart/>Java Quickstart</a></li><li><a href=../api/>Java API</a></li><li><a href=../custom-catalog/>Java Custom Catalog</a></li></ul></div><li><a href=https://iceberg.apache.org/docs/1.1.0/../../javadoc/latest><span>Javadoc</span></a></li></div></div><div id=content class=markdown-body><div class=margin-for-toc><h1 id=spark-writes>Spark Writes</h1><p>To use Iceberg in Spar [...]
+</span></span></code></pre></div><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>INSERT</span> <span style=color:#66d9ef>INTO</span> prod.db.<span style=color:#66d9ef>table</span> <span style=color:#66d9ef>SELECT</span> ...
+</span></span></code></pre></div><h3 id=merge-into><code>MERGE INTO</code></h3><p>Spark 3 added support for <code>MERGE INTO</code> queries that can express row-level updates.</p><p>Iceberg supports <code>MERGE INTO</code> by rewriting data files that contain rows that need to be updated in an <code>overwrite</code> commit.</p><p><strong><code>MERGE INTO</code> is recommended instead of <code>INSERT OVERWRITE</code></strong> because Iceberg can replace only the affected data files, and b [...]
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span><span style=color:#66d9ef>USING</span> (<span style=color:#66d9ef>SELECT</span> ...) s          <span style=color:#75715e>-- the source updates
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span><span style=color:#66d9ef>ON</span> t.id <span style=color:#f92672>=</span> s.id                <span style=color:#75715e>-- condition to find updates for target rows
+</span></span></span><span style=display:flex><span><span style=color:#75715e></span><span style=color:#66d9ef>WHEN</span> ...                      <span style=color:#75715e>-- updates
+</span></span></span></code></pre></div><p>Updates to rows in the target table are listed using <code>WHEN MATCHED ... THEN ...</code>. Multiple <code>MATCHED</code> clauses can be added with conditions that determine when each match should be applied. The first matching expression is used.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><spa [...]
+</span></span><span style=display:flex><span><span style=color:#66d9ef>WHEN</span> MATCHED <span style=color:#66d9ef>AND</span> t.<span style=color:#66d9ef>count</span> <span style=color:#66d9ef>IS</span> <span style=color:#66d9ef>NULL</span> <span style=color:#66d9ef>AND</span> s.op <span style=color:#f92672>=</span> <span style=color:#e6db74>&#39;increment&#39;</span> <span style=color:#66d9ef>THEN</span> <span style=color:#66d9ef>UPDATE</span> <span style=color:#66d9ef>SET</span> t.<s [...]
+</span></span><span style=display:flex><span><span style=color:#66d9ef>WHEN</span> MATCHED <span style=color:#66d9ef>AND</span> s.op <span style=color:#f92672>=</span> <span style=color:#e6db74>&#39;increment&#39;</span> <span style=color:#66d9ef>THEN</span> <span style=color:#66d9ef>UPDATE</span> <span style=color:#66d9ef>SET</span> t.<span style=color:#66d9ef>count</span> <span style=color:#f92672>=</span> t.<span style=color:#66d9ef>count</span> <span style=color:#f92672>+</span> <spa [...]
+</span></span></code></pre></div><p>Source rows (updates) that do not match can be inserted:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>WHEN</span> <span style=color:#66d9ef>NOT</span> MATCHED <span style=color:#66d9ef>THEN</span> <span style=color:#66d9ef>INSERT</span> <span style=color:#f92672>*</span>
+</span></span></code></pre></div><p>Inserts also support additional conditions:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>WHEN</span> <span style=color:#66d9ef>NOT</span> MATCHED <span style=color:#66d9ef>AND</span> s.event_time <span style=color:#f92672>&gt;</span> still_valid_threshold <span style=color:#66d9 [...]
+</span></span></code></pre></div><p>Only one record in the source data can update any given row of the target table, or else an error will be thrown.</p><h3 id=insert-overwrite><code>INSERT OVERWRITE</code></h3><p><code>INSERT OVERWRITE</code> can replace data in the table with the result of a query. Overwrites are atomic operations for Iceberg tables.</p><p>The partitions that will be replaced by <code>INSERT OVERWRITE</code> depends on Spark&rsquo;s partition overwrite mode and the par [...]
+</span></span><span style=display:flex><span>    uuid string <span style=color:#66d9ef>NOT</span> <span style=color:#66d9ef>NULL</span>,
+</span></span><span style=display:flex><span>    <span style=color:#66d9ef>level</span> string <span style=color:#66d9ef>NOT</span> <span style=color:#66d9ef>NULL</span>,
+</span></span><span style=display:flex><span>    ts <span style=color:#66d9ef>timestamp</span> <span style=color:#66d9ef>NOT</span> <span style=color:#66d9ef>NULL</span>,
+</span></span><span style=display:flex><span>    message string)
+</span></span><span style=display:flex><span><span style=color:#66d9ef>USING</span> iceberg
+</span></span><span style=display:flex><span>PARTITIONED <span style=color:#66d9ef>BY</span> (<span style=color:#66d9ef>level</span>, hours(ts))
+</span></span></code></pre></div><h4 id=dynamic-overwrite>Dynamic overwrite</h4><p>When Spark&rsquo;s overwrite mode is dynamic, partitions that have rows produced by the <code>SELECT</code> query will be replaced.</p><p>For example, this query removes duplicate log events from the example <code>logs</code> table.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style [...]
+</span></span><span style=display:flex><span><span style=color:#66d9ef>SELECT</span> uuid, <span style=color:#66d9ef>first</span>(<span style=color:#66d9ef>level</span>), <span style=color:#66d9ef>first</span>(ts), <span style=color:#66d9ef>first</span>(message)
+</span></span><span style=display:flex><span><span style=color:#66d9ef>FROM</span> prod.my_app.logs
+</span></span><span style=display:flex><span><span style=color:#66d9ef>WHERE</span> <span style=color:#66d9ef>cast</span>(ts <span style=color:#66d9ef>as</span> date) <span style=color:#f92672>=</span> <span style=color:#e6db74>&#39;2020-07-01&#39;</span>
+</span></span><span style=display:flex><span><span style=color:#66d9ef>GROUP</span> <span style=color:#66d9ef>BY</span> uuid
+</span></span></code></pre></div><p>In dynamic mode, this will replace any partition with rows in the <code>SELECT</code> result. Because the date of all rows is restricted to 1 July, only hours of that day will be replaced.</p><h4 id=static-overwrite>Static overwrite</h4><p>When Spark&rsquo;s overwrite mode is static, the <code>PARTITION</code> clause is converted to a filter that is used to delete from the table. If the <code>PARTITION</code> clause is omitted, all partitions will be r [...]
+</span></span><span style=display:flex><span>PARTITION (<span style=color:#66d9ef>level</span> <span style=color:#f92672>=</span> <span style=color:#e6db74>&#39;INFO&#39;</span>)
+</span></span><span style=display:flex><span><span style=color:#66d9ef>SELECT</span> uuid, <span style=color:#66d9ef>first</span>(<span style=color:#66d9ef>level</span>), <span style=color:#66d9ef>first</span>(ts), <span style=color:#66d9ef>first</span>(message)
+</span></span><span style=display:flex><span><span style=color:#66d9ef>FROM</span> prod.my_app.logs
+</span></span><span style=display:flex><span><span style=color:#66d9ef>WHERE</span> <span style=color:#66d9ef>level</span> <span style=color:#f92672>=</span> <span style=color:#e6db74>&#39;INFO&#39;</span>
+</span></span><span style=display:flex><span><span style=color:#66d9ef>GROUP</span> <span style=color:#66d9ef>BY</span> uuid
+</span></span></code></pre></div><p>Note that this mode cannot replace hourly partitions like the dynamic example query because the <code>PARTITION</code> clause can only reference table columns, not hidden partitions.</p><h3 id=delete-from><code>DELETE FROM</code></h3><p>Spark 3 added support for <code>DELETE FROM</code> queries to remove data from tables.</p><p>Delete queries accept a filter to match rows to delete.</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background [...]
+</span></span><span style=display:flex><span><span style=color:#66d9ef>WHERE</span> ts <span style=color:#f92672>&gt;=</span> <span style=color:#e6db74>&#39;2020-05-01 00:00:00&#39;</span> <span style=color:#66d9ef>and</span> ts <span style=color:#f92672>&lt;</span> <span style=color:#e6db74>&#39;2020-06-01 00:00:00&#39;</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#66d9ef>DELETE</span> <span style=color:#66d9ef>FROM</span> prod.db.all_events
+</span></span><span style=display:flex><span><span style=color:#66d9ef>WHERE</span> session_time <span style=color:#f92672>&lt;</span> (<span style=color:#66d9ef>SELECT</span> <span style=color:#66d9ef>min</span>(session_time) <span style=color:#66d9ef>FROM</span> prod.db.good_events)
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#66d9ef>DELETE</span> <span style=color:#66d9ef>FROM</span> prod.db.orders <span style=color:#66d9ef>AS</span> t1
+</span></span><span style=display:flex><span><span style=color:#66d9ef>WHERE</span> <span style=color:#66d9ef>EXISTS</span> (<span style=color:#66d9ef>SELECT</span> oid <span style=color:#66d9ef>FROM</span> prod.db.returned_orders <span style=color:#66d9ef>WHERE</span> t1.oid <span style=color:#f92672>=</span> oid)
+</span></span></code></pre></div><p>If the delete filter matches entire partitions of the table, Iceberg will perform a metadata-only delete. If the filter matches individual rows of a table, then Iceberg will rewrite only the affected data files.</p><h3 id=update><code>UPDATE</code></h3><p>Spark 3.1 added support for <code>UPDATE</code> queries that update matching rows in tables.</p><p>Update queries accept a filter to match rows to update.</p><div class=highlight><pre tabindex=0 style [...]
+</span></span><span style=display:flex><span><span style=color:#66d9ef>SET</span> c1 <span style=color:#f92672>=</span> <span style=color:#e6db74>&#39;update_c1&#39;</span>, c2 <span style=color:#f92672>=</span> <span style=color:#e6db74>&#39;update_c2&#39;</span>
+</span></span><span style=display:flex><span><span style=color:#66d9ef>WHERE</span> ts <span style=color:#f92672>&gt;=</span> <span style=color:#e6db74>&#39;2020-05-01 00:00:00&#39;</span> <span style=color:#66d9ef>and</span> ts <span style=color:#f92672>&lt;</span> <span style=color:#e6db74>&#39;2020-06-01 00:00:00&#39;</span>
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#66d9ef>UPDATE</span> prod.db.all_events
+</span></span><span style=display:flex><span><span style=color:#66d9ef>SET</span> session_time <span style=color:#f92672>=</span> <span style=color:#ae81ff>0</span>, ignored <span style=color:#f92672>=</span> <span style=color:#66d9ef>true</span>
+</span></span><span style=display:flex><span><span style=color:#66d9ef>WHERE</span> session_time <span style=color:#f92672>&lt;</span> (<span style=color:#66d9ef>SELECT</span> <span style=color:#66d9ef>min</span>(session_time) <span style=color:#66d9ef>FROM</span> prod.db.good_events)
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#66d9ef>UPDATE</span> prod.db.orders <span style=color:#66d9ef>AS</span> t1
+</span></span><span style=display:flex><span><span style=color:#66d9ef>SET</span> order_status <span style=color:#f92672>=</span> <span style=color:#e6db74>&#39;returned&#39;</span>
+</span></span><span style=display:flex><span><span style=color:#66d9ef>WHERE</span> <span style=color:#66d9ef>EXISTS</span> (<span style=color:#66d9ef>SELECT</span> oid <span style=color:#66d9ef>FROM</span> prod.db.returned_orders <span style=color:#66d9ef>WHERE</span> t1.oid <span style=color:#f92672>=</span> oid)
+</span></span></code></pre></div><p>For more complex row-level updates based on incoming data, see the section on <code>MERGE INTO</code>.</p><h2 id=writing-with-dataframes>Writing with DataFrames</h2><p>Spark 3 introduced the new <code>DataFrameWriterV2</code> API for writing to tables using data frames. The v2 API is recommended for several reasons:</p><ul><li>CTAS, RTAS, and overwrite by filter are supported</li><li>All operations consistently write columns to a table by name</li><li> [...]
+Using <code>format("iceberg")</code> loads an isolated table reference that will not automatically refresh tables used by queries.</div><h3 id=appending-data>Appending data</h3><p>To append a dataframe to an Iceberg table, use <code>append</code>:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-scala data-lang=scala><span style=display:flex><span><span style=color:#66d9ef>val</span> data<sp [...]
+</span></span><span style=display:flex><span>data<span style=color:#f92672>.</span>writeTo<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;prod.db.table&#34;</span><span style=color:#f92672>).</span>append<span style=color:#f92672>()</span>
+</span></span></code></pre></div><h4 id=spark-24>Spark 2.4</h4><p>In Spark 2.4, use the v1 API with <code>append</code> mode and <code>iceberg</code> format:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-scala data-lang=scala><span style=display:flex><span>data<span style=color:#f92672>.</span>write
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>format<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;iceberg&#34;</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>mode<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;append&#34;</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>save<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;db.table&#34;</span><span style=color:#f92672>)</span>
+</span></span></code></pre></div><h3 id=overwriting-data>Overwriting data</h3><p>To overwrite partitions dynamically, use <code>overwritePartitions()</code>:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-scala data-lang=scala><span style=display:flex><span><span style=color:#66d9ef>val</span> data<span style=color:#66d9ef>:</span> <span style=color:#66d9ef>DataFrame</span> <span style=col [...]
+</span></span><span style=display:flex><span>data<span style=color:#f92672>.</span>writeTo<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;prod.db.table&#34;</span><span style=color:#f92672>).</span>overwritePartitions<span style=color:#f92672>()</span>
+</span></span></code></pre></div><p>To explicitly overwrite partitions, use <code>overwrite</code> to supply a filter:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-scala data-lang=scala><span style=display:flex><span>data<span style=color:#f92672>.</span>writeTo<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;prod.db.table&#34;</span><span style=color:#f92672>).</span>ove [...]
+</span></span></code></pre></div><h4 id=spark-24-1>Spark 2.4</h4><p>In Spark 2.4, overwrite values in an Iceberg table with <code>overwrite</code> mode and <code>iceberg</code> format:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-scala data-lang=scala><span style=display:flex><span>data<span style=color:#f92672>.</span>write
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>format<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;iceberg&#34;</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>mode<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;overwrite&#34;</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>save<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;db.table&#34;</span><span style=color:#f92672>)</span>
+</span></span></code></pre></div><div class=danger><strong>The behavior of overwrite mode changed between Spark 2.4 and Spark 3</strong>.</div><p>The behavior of DataFrameWriter overwrite mode was undefined in Spark 2.4, but is required to overwrite the entire table in Spark 3. Because of this new requirement, the Iceberg source&rsquo;s behavior changed in Spark 3. In Spark 2.4, the behavior was to dynamically overwrite partitions. To use the Spark 2.4 behavior, add option <code>overwrit [...]
+</span></span><span style=display:flex><span>data<span style=color:#f92672>.</span>writeTo<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;prod.db.table&#34;</span><span style=color:#f92672>).</span>create<span style=color:#f92672>()</span>
+</span></span></code></pre></div><p>If you have replaced the default Spark catalog (<code>spark_catalog</code>) with Iceberg&rsquo;s <code>SparkSessionCatalog</code>, do:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-scala data-lang=scala><span style=display:flex><span><span style=color:#66d9ef>val</span> data<span style=color:#66d9ef>:</span> <span style=color:#66d9ef>DataFrame</span> <s [...]
+</span></span><span style=display:flex><span>data<span style=color:#f92672>.</span>writeTo<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;db.table&#34;</span><span style=color:#f92672>).</span>using<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;iceberg&#34;</span><span style=color:#f92672>).</span>create<span style=color:#f92672>()</span>
+</span></span></code></pre></div><p>Create and replace operations support table configuration methods, like <code>partitionedBy</code> and <code>tableProperty</code>:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-scala data-lang=scala><span style=display:flex><span>data<span style=color:#f92672>.</span>writeTo<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;prod.db.table&# [...]
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>tableProperty<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;write.format.default&#34;</span><span style=color:#f92672>,</span> <span style=color:#e6db74>&#34;orc&#34;</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>partitionedBy<span style=color:#f92672>(</span>$<span style=color:#e6db74>&#34;level&#34;</span><span style=color:#f92672>,</span> days<span style=color:#f92672>(</span>$<span style=color:#e6db74>&#34;ts&#34;</span><span style=color:#f92672>))</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>createOrReplace<span style=color:#f92672>()</span>
+</span></span></code></pre></div><h2 id=writing-to-partitioned-tables>Writing to partitioned tables</h2><p>Iceberg requires the data to be sorted according to the partition spec per task (Spark partition) in prior to write
+against partitioned table. This applies both Writing with SQL and Writing with DataFrames.</p><div class=info>Explicit sort is necessary because Spark doesn&rsquo;t allow Iceberg to request a sort before writing as of Spark 3.0.
+<a href=https://issues.apache.org/jira/browse/SPARK-23889>SPARK-23889</a> is filed to enable Iceberg to require specific
+distribution & sort order to Spark.</div><div class=info>Both global sort (<code>orderBy</code>/<code>sort</code>) and local sort (<code>sortWithinPartitions</code>) work for the requirement.</div><p>Let&rsquo;s go through writing the data against below sample table:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>CR [...]
+</span></span><span style=display:flex><span>    id bigint,
+</span></span><span style=display:flex><span>    <span style=color:#66d9ef>data</span> string,
+</span></span><span style=display:flex><span>    category string,
+</span></span><span style=display:flex><span>    ts <span style=color:#66d9ef>timestamp</span>)
+</span></span><span style=display:flex><span><span style=color:#66d9ef>USING</span> iceberg
+</span></span><span style=display:flex><span>PARTITIONED <span style=color:#66d9ef>BY</span> (days(ts), category)
+</span></span></code></pre></div><p>To write data to the sample table, your data needs to be sorted by <code>days(ts), category</code>.</p><p>If you&rsquo;re inserting data with SQL statement, you can use <code>ORDER BY</code> to achieve it, like below:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>INSERT</span> <s [...]
+</span></span><span style=display:flex><span><span style=color:#66d9ef>SELECT</span> id, <span style=color:#66d9ef>data</span>, category, ts <span style=color:#66d9ef>FROM</span> another_table
+</span></span><span style=display:flex><span><span style=color:#66d9ef>ORDER</span> <span style=color:#66d9ef>BY</span> ts, category
+</span></span></code></pre></div><p>If you&rsquo;re inserting data with DataFrame, you can use either <code>orderBy</code>/<code>sort</code> to trigger global sort, or <code>sortWithinPartitions</code>
+to trigger local sort. Local sort for example:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-scala data-lang=scala><span style=display:flex><span>data<span style=color:#f92672>.</span>sortWithinPartitions<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;ts&#34;</span><span style=color:#f92672>,</span> <span style=color:#e6db74>&#34;category&#34;</span><span style=color:#f92 [...]
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>writeTo<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;prod.db.sample&#34;</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>append<span style=color:#f92672>()</span>
+</span></span></code></pre></div><p>You can simply add the original column to the sort condition for the most partition transformations, except <code>bucket</code>.</p><p>For <code>bucket</code> partition transformation, you need to register the Iceberg transform function in Spark to specify it during sort.</p><p>Let&rsquo;s go through another sample table having bucket partition:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab- [...]
+</span></span><span style=display:flex><span>    id bigint,
+</span></span><span style=display:flex><span>    <span style=color:#66d9ef>data</span> string,
+</span></span><span style=display:flex><span>    category string,
+</span></span><span style=display:flex><span>    ts <span style=color:#66d9ef>timestamp</span>)
+</span></span><span style=display:flex><span><span style=color:#66d9ef>USING</span> iceberg
+</span></span><span style=display:flex><span>PARTITIONED <span style=color:#66d9ef>BY</span> (bucket(<span style=color:#ae81ff>16</span>, id))
+</span></span></code></pre></div><p>You need to register the function to deal with bucket, like below:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-scala data-lang=scala><span style=display:flex><span><span style=color:#66d9ef>import</span> org.apache.iceberg.spark.IcebergSpark
+</span></span><span style=display:flex><span><span style=color:#66d9ef>import</span> org.apache.spark.sql.types.DataTypes
+</span></span><span style=display:flex><span>
+</span></span><span style=display:flex><span><span style=color:#a6e22e>IcebergSpark</span><span style=color:#f92672>.</span>registerBucketUDF<span style=color:#f92672>(</span>spark<span style=color:#f92672>,</span> <span style=color:#e6db74>&#34;iceberg_bucket16&#34;</span><span style=color:#f92672>,</span> <span style=color:#a6e22e>DataTypes</span><span style=color:#f92672>.</span><span style=color:#a6e22e>LongType</span><span style=color:#f92672>,</span> <span style=color:#ae81ff>16</s [...]
+</span></span></code></pre></div><div class=info>Explicit registration of the function is necessary because Spark doesn&rsquo;t allow Iceberg to provide functions.
+<a href=https://issues.apache.org/jira/browse/SPARK-27658>SPARK-27658</a> is filed to enable Iceberg to provide functions
+which can be used in query.</div><p>Here we just registered the bucket function as <code>iceberg_bucket16</code>, which can be used in sort clause.</p><p>If you&rsquo;re inserting data with SQL statement, you can use the function like below:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-sql data-lang=sql><span style=display:flex><span><span style=color:#66d9ef>INSERT</span> <span style=co [...]
+</span></span><span style=display:flex><span><span style=color:#66d9ef>SELECT</span> id, <span style=color:#66d9ef>data</span>, category, ts <span style=color:#66d9ef>FROM</span> another_table
+</span></span><span style=display:flex><span><span style=color:#66d9ef>ORDER</span> <span style=color:#66d9ef>BY</span> iceberg_bucket16(id)
+</span></span></code></pre></div><p>If you&rsquo;re inserting data with DataFrame, you can use the function like below:</p><div class=highlight><pre tabindex=0 style=color:#f8f8f2;background-color:#272822;-moz-tab-size:4;-o-tab-size:4;tab-size:4><code class=language-scala data-lang=scala><span style=display:flex><span>data<span style=color:#f92672>.</span>sortWithinPartitions<span style=color:#f92672>(</span>expr<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;iceberg_buc [...]
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>writeTo<span style=color:#f92672>(</span><span style=color:#e6db74>&#34;prod.db.sample&#34;</span><span style=color:#f92672>)</span>
+</span></span><span style=display:flex><span>    <span style=color:#f92672>.</span>append<span style=color:#f92672>()</span>
+</span></span></code></pre></div><h2 id=type-compatibility>Type compatibility</h2><p>Spark and Iceberg support different set of types. Iceberg does the type conversion automatically, but not for all combinations,
+so you may want to understand the type conversion in Iceberg in prior to design the types of columns in your tables.</p><h3 id=spark-type-to-iceberg-type>Spark type to Iceberg type</h3><p>This type conversion table describes how Spark types are converted to the Iceberg types. The conversion applies on both creating Iceberg table and writing to Iceberg table via Spark.</p><table><thead><tr><th>Spark</th><th>Iceberg</th><th>Notes</th></tr></thead><tbody><tr><td>boolean</td><td>boolean</td> [...]
+<script src=https://iceberg.apache.org/docs/1.1.0//js/jquery.easing.min.js></script>
+<script type=text/javascript src=https://iceberg.apache.org/docs/1.1.0//js/search.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/bootstrap.min.js></script>
+<script src=https://iceberg.apache.org/docs/1.1.0//js/iceberg-theme.js></script></html>
\ No newline at end of file
diff --git a/docs/1.1.0/spark/getting-started/index.html b/docs/1.1.0/spark/getting-started/index.html
new file mode 100644
index 00000000..75a65379
--- /dev/null
+++ b/docs/1.1.0/spark/getting-started/index.html
@@ -0,0 +1 @@
+<!doctype html><html lang=en-us><head><title>https://iceberg.apache.org/docs/1.1.0/getting-started/</title><link rel=canonical href=https://iceberg.apache.org/docs/1.1.0/getting-started/><meta name=robots content="noindex"><meta charset=utf-8><meta http-equiv=refresh content="0; url=https://iceberg.apache.org/docs/1.1.0/getting-started/"></head></html>
\ No newline at end of file
diff --git a/docs/1.1.0/spark/spark-configuration/index.html b/docs/1.1.0/spark/spark-configuration/index.html
new file mode 100644
index 00000000..48aadc0f
--- /dev/null
+++ b/docs/1.1.0/spark/spark-configuration/index.html
@@ -0,0 +1 @@
+<!doctype html><html lang=en-us><head><title>https://iceberg.apache.org/docs/1.1.0/spark-configuration/</title><link rel=canonical href=https://iceberg.apache.org/docs/1.1.0/spark-configuration/><meta name=robots content="noindex"><meta charset=utf-8><meta http-equiv=refresh content="0; url=https://iceberg.apache.org/docs/1.1.0/spark-configuration/"></head></html>
\ No newline at end of file
diff --git a/docs/1.1.0/spark/spark-ddl/index.html b/docs/1.1.0/spark/spark-ddl/index.html
new file mode 100644
index 00000000..0ff6dac1
--- /dev/null
+++ b/docs/1.1.0/spark/spark-ddl/index.html
@@ -0,0 +1 @@
+<!doctype html><html lang=en-us><head><title>https://iceberg.apache.org/docs/1.1.0/spark-ddl/</title><link rel=canonical href=https://iceberg.apache.org/docs/1.1.0/spark-ddl/><meta name=robots content="noindex"><meta charset=utf-8><meta http-equiv=refresh content="0; url=https://iceberg.apache.org/docs/1.1.0/spark-ddl/"></head></html>
\ No newline at end of file
diff --git a/docs/1.1.0/spark/spark-procedures/index.html b/docs/1.1.0/spark/spark-procedures/index.html
new file mode 100644
index 00000000..9fb0a350
--- /dev/null
+++ b/docs/1.1.0/spark/spark-procedures/index.html
@@ -0,0 +1 @@
+<!doctype html><html lang=en-us><head><title>https://iceberg.apache.org/docs/1.1.0/spark-procedures/</title><link rel=canonical href=https://iceberg.apache.org/docs/1.1.0/spark-procedures/><meta name=robots content="noindex"><meta charset=utf-8><meta http-equiv=refresh content="0; url=https://iceberg.apache.org/docs/1.1.0/spark-procedures/"></head></html>
\ No newline at end of file
diff --git a/docs/1.1.0/spark/spark-queries/index.html b/docs/1.1.0/spark/spark-queries/index.html
new file mode 100644
index 00000000..d6e5f480
--- /dev/null
+++ b/docs/1.1.0/spark/spark-queries/index.html
@@ -0,0 +1 @@
+<!doctype html><html lang=en-us><head><title>https://iceberg.apache.org/docs/1.1.0/spark-queries/</title><link rel=canonical href=https://iceberg.apache.org/docs/1.1.0/spark-queries/><meta name=robots content="noindex"><meta charset=utf-8><meta http-equiv=refresh content="0; url=https://iceberg.apache.org/docs/1.1.0/spark-queries/"></head></html>
\ No newline at end of file
diff --git a/docs/1.1.0/spark/spark-structured-streaming/index.html b/docs/1.1.0/spark/spark-structured-streaming/index.html
new file mode 100644
index 00000000..6b4f7886
--- /dev/null
+++ b/docs/1.1.0/spark/spark-structured-streaming/index.html
@@ -0,0 +1 @@
+<!doctype html><html lang=en-us><head><title>https://iceberg.apache.org/docs/1.1.0/spark-structured-streaming/</title><link rel=canonical href=https://iceberg.apache.org/docs/1.1.0/spark-structured-streaming/><meta name=robots content="noindex"><meta charset=utf-8><meta http-equiv=refresh content="0; url=https://iceberg.apache.org/docs/1.1.0/spark-structured-streaming/"></head></html>
\ No newline at end of file
diff --git a/docs/1.1.0/spark/spark-writes/index.html b/docs/1.1.0/spark/spark-writes/index.html
new file mode 100644
index 00000000..6b95b2bb
--- /dev/null
+++ b/docs/1.1.0/spark/spark-writes/index.html
@@ -0,0 +1 @@
+<!doctype html><html lang=en-us><head><title>https://iceberg.apache.org/docs/1.1.0/spark-writes/</title><link rel=canonical href=https://iceberg.apache.org/docs/1.1.0/spark-writes/><meta name=robots content="noindex"><meta charset=utf-8><meta http-equiv=refresh content="0; url=https://iceberg.apache.org/docs/1.1.0/spark-writes/"></head></html>
\ No newline at end of file
diff --git a/docs/1.1.0/tables/configuration/index.html b/docs/1.1.0/tables/configuration/index.html
new file mode 100644
index 00000000..c4e27891
--- /dev/null
+++ b/docs/1.1.0/tables/configuration/index.html
@@ -0,0 +1 @@
+<!doctype html><html lang=en-us><head><title>https://iceberg.apache.org/docs/1.1.0/configuration/</title><link rel=canonical href=https://iceberg.apache.org/docs/1.1.0/configuration/><meta name=robots content="noindex"><meta charset=utf-8><meta http-equiv=refresh content="0; url=https://iceberg.apache.org/docs/1.1.0/configuration/"></head></html>
\ No newline at end of file
diff --git a/docs/1.1.0/tables/evolution/index.html b/docs/1.1.0/tables/evolution/index.html
new file mode 100644
index 00000000..2a62ec15
--- /dev/null
+++ b/docs/1.1.0/tables/evolution/index.html
@@ -0,0 +1 @@
+<!doctype html><html lang=en-us><head><title>https://iceberg.apache.org/docs/1.1.0/evolution/</title><link rel=canonical href=https://iceberg.apache.org/docs/1.1.0/evolution/><meta name=robots content="noindex"><meta charset=utf-8><meta http-equiv=refresh content="0; url=https://iceberg.apache.org/docs/1.1.0/evolution/"></head></html>
\ No newline at end of file
diff --git a/docs/1.1.0/tables/maintenance/index.html b/docs/1.1.0/tables/maintenance/index.html
new file mode 100644
index 00000000..22d43b88
--- /dev/null
+++ b/docs/1.1.0/tables/maintenance/index.html
@@ -0,0 +1 @@
+<!doctype html><html lang=en-us><head><title>https://iceberg.apache.org/docs/1.1.0/maintenance/</title><link rel=canonical href=https://iceberg.apache.org/docs/1.1.0/maintenance/><meta name=robots content="noindex"><meta charset=utf-8><meta http-equiv=refresh content="0; url=https://iceberg.apache.org/docs/1.1.0/maintenance/"></head></html>
\ No newline at end of file
diff --git a/docs/1.1.0/tables/partitioning/index.html b/docs/1.1.0/tables/partitioning/index.html
new file mode 100644
index 00000000..059c2a01
--- /dev/null
+++ b/docs/1.1.0/tables/partitioning/index.html
@@ -0,0 +1 @@
+<!doctype html><html lang=en-us><head><title>https://iceberg.apache.org/docs/1.1.0/partitioning/</title><link rel=canonical href=https://iceberg.apache.org/docs/1.1.0/partitioning/><meta name=robots content="noindex"><meta charset=utf-8><meta http-equiv=refresh content="0; url=https://iceberg.apache.org/docs/1.1.0/partitioning/"></head></html>
\ No newline at end of file
diff --git a/docs/1.1.0/tables/performance/index.html b/docs/1.1.0/tables/performance/index.html
new file mode 100644
index 00000000..8143f218
--- /dev/null
+++ b/docs/1.1.0/tables/performance/index.html
@@ -0,0 +1 @@
+<!doctype html><html lang=en-us><head><title>https://iceberg.apache.org/docs/1.1.0/performance/</title><link rel=canonical href=https://iceberg.apache.org/docs/1.1.0/performance/><meta name=robots content="noindex"><meta charset=utf-8><meta http-equiv=refresh content="0; url=https://iceberg.apache.org/docs/1.1.0/performance/"></head></html>
\ No newline at end of file
diff --git a/docs/1.1.0/tables/reliability/index.html b/docs/1.1.0/tables/reliability/index.html
new file mode 100644
index 00000000..3c9d5989
--- /dev/null
+++ b/docs/1.1.0/tables/reliability/index.html
@@ -0,0 +1 @@
+<!doctype html><html lang=en-us><head><title>https://iceberg.apache.org/docs/1.1.0/reliability/</title><link rel=canonical href=https://iceberg.apache.org/docs/1.1.0/reliability/><meta name=robots content="noindex"><meta charset=utf-8><meta http-equiv=refresh content="0; url=https://iceberg.apache.org/docs/1.1.0/reliability/"></head></html>
\ No newline at end of file
diff --git a/docs/1.1.0/tables/schemas/index.html b/docs/1.1.0/tables/schemas/index.html
new file mode 100644
index 00000000..88287a85
--- /dev/null
+++ b/docs/1.1.0/tables/schemas/index.html
@@ -0,0 +1 @@
+<!doctype html><html lang=en-us><head><title>https://iceberg.apache.org/docs/1.1.0/schemas/</title><link rel=canonical href=https://iceberg.apache.org/docs/1.1.0/schemas/><meta name=robots content="noindex"><meta charset=utf-8><meta http-equiv=refresh content="0; url=https://iceberg.apache.org/docs/1.1.0/schemas/"></head></html>
\ No newline at end of file