You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iceberg.apache.org by ru...@apache.org on 2022/02/08 18:07:22 UTC

[iceberg-docs] 03/47: Add landing page site and versioned docs structure (#2)

This is an automated email from the ASF dual-hosted git repository.

russellspitzer pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/iceberg-docs.git

commit 59b01757e67b185333ae0eff90df4f1a918dad1f
Author: Samuel Redai <43...@users.noreply.github.com>
AuthorDate: Tue Jan 4 09:16:08 2022 -0800

    Add landing page site and versioned docs structure (#2)
---
 .github/workflows/deploy.yml                       |   95 +
 .gitignore                                         |   11 +
 .gitmodules                                        |    3 +
 README.md                                          |   94 +-
 docs/.hugo_build.lock                              |    0
 docs/_default2/list.html                           |    0
 docs/_default2/single.html                         |    0
 docs/archetypes/default.md                         |    6 +
 docs/assets/_custom.scss                           |   78 +
 docs/assets/search.js                              |  116 +
 docs/config.toml                                   |   11 +
 docs/content/_index.md                             |   54 +
 docs/content/docs/api/_index.md                    |    5 +
 docs/content/docs/api/java-api-quickstart.md       |  197 +
 docs/content/docs/api/java-api.md                  |  259 +
 docs/content/docs/api/java-custom-catalog.md       |  272 +
 docs/content/docs/api/python-api-intro.md          |  180 +
 docs/content/docs/api/python-feature-support.md    |   76 +
 docs/content/docs/api/python-quickstart.md         |   67 +
 docs/content/docs/asf/_index.md                    |    6 +
 docs/content/docs/asf/donate/_index.md             |   22 +
 docs/content/docs/asf/events/_index.md             |   22 +
 docs/content/docs/asf/license/_index.md            |   22 +
 docs/content/docs/asf/security/_index.md           |   22 +
 docs/content/docs/asf/sponsors/_index.md           |   22 +
 docs/content/docs/community/_index.md              |    5 +
 docs/content/docs/community/blogs.md               |    4 +
 docs/content/docs/community/join.md                |    4 +
 docs/content/docs/community/talks.md               |    4 +
 docs/content/docs/flink/_index.md                  |    5 +
 docs/content/docs/flink/flink-connector.md         |  145 +
 docs/content/docs/flink/flink-getting-started.md   |  557 ++
 docs/content/docs/format/_index.md                 |    5 +
 docs/content/docs/format/spec.md                   |    4 +
 docs/content/docs/format/terms.md                  |    4 +
 docs/content/docs/hive/_index.md                   |  350 ++
 docs/content/docs/integrations/_index.md           |    5 +
 docs/content/docs/integrations/aws.md              |  494 ++
 docs/content/docs/integrations/jdbc.md             |   71 +
 docs/content/docs/integrations/nessie.md           |  161 +
 docs/content/docs/prestodb/_index.md               |   23 +
 docs/content/docs/project/_index.md                |    5 +
 docs/content/docs/project/benchmarks.md            |    5 +
 docs/content/docs/project/how-to-release.md        |    4 +
 docs/content/docs/project/roadmap.md               |    4 +
 docs/content/docs/project/security.md              |    4 +
 docs/content/docs/project/trademarks.md            |    4 +
 docs/content/docs/releases/0.12.0/_index.md        |    5 +
 docs/content/docs/releases/0.12.1/_index.md        |    5 +
 docs/content/docs/releases/_index.md               |    5 +
 docs/content/docs/releases/latest/_index.md        |    5 +
 docs/content/docs/releases/release-notes.md        |    4 +
 docs/content/docs/spark/_index.md                  |    5 +
 docs/content/docs/spark/spark-configuration.md     |  186 +
 docs/content/docs/spark/spark-ddl.md               |  343 ++
 docs/content/docs/spark/spark-getting-started.md   |  144 +
 docs/content/docs/spark/spark-procedures.md        |  458 ++
 docs/content/docs/spark/spark-queries.md           |  265 +
 .../docs/spark/spark-structured-streaming.md       |  113 +
 docs/content/docs/spark/spark-writes.md            |  457 ++
 docs/content/docs/tables/_index.md                 |    5 +
 docs/content/docs/tables/configuration.md          |  131 +
 docs/content/docs/tables/evolution.md              |  103 +
 docs/content/docs/tables/maintenance.md            |  149 +
 docs/content/docs/tables/partitioning.md           |   97 +
 docs/content/docs/tables/performance.md            |   57 +
 docs/content/docs/tables/reliability.md            |   70 +
 docs/content/docs/tables/schemas.md                |   46 +
 docs/content/docs/trino/_index.md                  |   23 +
 docs/layouts/partials/docs/brand.html              |   11 +
 docs/layouts/partials/docs/header.html             |   14 +
 docs/layouts/partials/docs/menu-filetree.html      |  124 +
 docs/layouts/partials/docs/search.html             |   13 +
 docs/layouts/shortcodes/icebergVersion.html        |    1 +
 docs/layouts/shortcodes/nessieVersion.html         |    1 +
 docs/static/css/bootstrap.css                      | 6199 ++++++++++++++++++++
 docs/static/css/pricing.css                        |  133 +
 docs/static/favicon-16x16.png                      |  Bin 0 -> 1496 bytes
 docs/static/favicon-32x32.png                      |  Bin 0 -> 2101 bytes
 docs/static/favicon-96x96.png                      |  Bin 0 -> 5456 bytes
 docs/static/favicon.ico                            |  Bin 0 -> 1150 bytes
 docs/static/favicon.png                            |  Bin 0 -> 5456 bytes
 docs/static/flexsearch.min.js                      |    0
 docs/static/font-awesome-4.7.0/HELP-US-OUT.txt     |    7 +
 .../static/font-awesome-4.7.0/css/font-awesome.css | 2337 ++++++++
 .../font-awesome-4.7.0/css/font-awesome.min.css    |    4 +
 .../font-awesome-4.7.0/fonts/FontAwesome.otf       |  Bin 0 -> 134808 bytes
 .../fonts/fontawesome-webfont.eot                  |  Bin 0 -> 165742 bytes
 .../fonts/fontawesome-webfont.svg                  | 2671 +++++++++
 .../fonts/fontawesome-webfont.ttf                  |  Bin 0 -> 165548 bytes
 .../fonts/fontawesome-webfont.woff                 |  Bin 0 -> 98024 bytes
 .../fonts/fontawesome-webfont.woff2                |  Bin 0 -> 77160 bytes
 docs/static/font-awesome-4.7.0/less/animated.less  |   34 +
 .../font-awesome-4.7.0/less/bordered-pulled.less   |   25 +
 docs/static/font-awesome-4.7.0/less/core.less      |   12 +
 .../font-awesome-4.7.0/less/fixed-width.less       |    6 +
 .../font-awesome-4.7.0/less/font-awesome.less      |   18 +
 docs/static/font-awesome-4.7.0/less/icons.less     |  789 +++
 docs/static/font-awesome-4.7.0/less/larger.less    |   13 +
 docs/static/font-awesome-4.7.0/less/list.less      |   19 +
 docs/static/font-awesome-4.7.0/less/mixins.less    |   60 +
 docs/static/font-awesome-4.7.0/less/path.less      |   15 +
 .../font-awesome-4.7.0/less/rotated-flipped.less   |   20 +
 .../font-awesome-4.7.0/less/screen-reader.less     |    5 +
 docs/static/font-awesome-4.7.0/less/stacked.less   |   20 +
 docs/static/font-awesome-4.7.0/less/variables.less |  800 +++
 docs/static/font-awesome-4.7.0/scss/_animated.scss |   34 +
 .../font-awesome-4.7.0/scss/_bordered-pulled.scss  |   25 +
 docs/static/font-awesome-4.7.0/scss/_core.scss     |   12 +
 .../font-awesome-4.7.0/scss/_fixed-width.scss      |    6 +
 docs/static/font-awesome-4.7.0/scss/_icons.scss    |  789 +++
 docs/static/font-awesome-4.7.0/scss/_larger.scss   |   13 +
 docs/static/font-awesome-4.7.0/scss/_list.scss     |   19 +
 docs/static/font-awesome-4.7.0/scss/_mixins.scss   |   60 +
 docs/static/font-awesome-4.7.0/scss/_path.scss     |   15 +
 .../font-awesome-4.7.0/scss/_rotated-flipped.scss  |   20 +
 .../font-awesome-4.7.0/scss/_screen-reader.scss    |    5 +
 docs/static/font-awesome-4.7.0/scss/_stacked.scss  |   20 +
 .../static/font-awesome-4.7.0/scss/_variables.scss |  800 +++
 .../font-awesome-4.7.0/scss/font-awesome.scss      |   18 +
 docs/static/fontawesome/HELP-US-OUT.txt            |    7 +
 docs/static/fontawesome/css/font-awesome.css       | 2337 ++++++++
 docs/static/fontawesome/css/font-awesome.min.css   |    4 +
 docs/static/fontawesome/fonts/FontAwesome.otf      |  Bin 0 -> 134808 bytes
 .../fontawesome/fonts/fontawesome-webfont.eot      |  Bin 0 -> 165742 bytes
 .../fontawesome/fonts/fontawesome-webfont.svg      | 2671 +++++++++
 .../fontawesome/fonts/fontawesome-webfont.ttf      |  Bin 0 -> 165548 bytes
 .../fontawesome/fonts/fontawesome-webfont.woff     |  Bin 0 -> 98024 bytes
 .../fontawesome/fonts/fontawesome-webfont.woff2    |  Bin 0 -> 77160 bytes
 docs/static/fontawesome/less/animated.less         |   34 +
 docs/static/fontawesome/less/bordered-pulled.less  |   25 +
 docs/static/fontawesome/less/core.less             |   12 +
 docs/static/fontawesome/less/fixed-width.less      |    6 +
 docs/static/fontawesome/less/font-awesome.less     |   18 +
 docs/static/fontawesome/less/icons.less            |  789 +++
 docs/static/fontawesome/less/larger.less           |   13 +
 docs/static/fontawesome/less/list.less             |   19 +
 docs/static/fontawesome/less/mixins.less           |   60 +
 docs/static/fontawesome/less/path.less             |   15 +
 docs/static/fontawesome/less/rotated-flipped.less  |   20 +
 docs/static/fontawesome/less/screen-reader.less    |    5 +
 docs/static/fontawesome/less/stacked.less          |   20 +
 docs/static/fontawesome/less/variables.less        |  800 +++
 docs/static/fontawesome/scss/_animated.scss        |   34 +
 docs/static/fontawesome/scss/_bordered-pulled.scss |   25 +
 docs/static/fontawesome/scss/_core.scss            |   12 +
 docs/static/fontawesome/scss/_fixed-width.scss     |    6 +
 docs/static/fontawesome/scss/_icons.scss           |  789 +++
 docs/static/fontawesome/scss/_larger.scss          |   13 +
 docs/static/fontawesome/scss/_list.scss            |   19 +
 docs/static/fontawesome/scss/_mixins.scss          |   60 +
 docs/static/fontawesome/scss/_path.scss            |   15 +
 docs/static/fontawesome/scss/_rotated-flipped.scss |   20 +
 docs/static/fontawesome/scss/_screen-reader.scss   |    5 +
 docs/static/fontawesome/scss/_stacked.scss         |   20 +
 docs/static/fontawesome/scss/_variables.scss       |  800 +++
 docs/static/fontawesome/scss/font-awesome.scss     |   18 +
 docs/static/fonts/glyphicons-halflings-regular.eot |  Bin 0 -> 20335 bytes
 docs/static/fonts/glyphicons-halflings-regular.svg |  229 +
 docs/static/fonts/glyphicons-halflings-regular.ttf |  Bin 0 -> 41280 bytes
 .../static/fonts/glyphicons-halflings-regular.woff |  Bin 0 -> 23320 bytes
 docs/static/img/GitHub-Mark.png                    |  Bin 0 -> 4268 bytes
 docs/static/img/Iceberg-logo-wordmark.png          |  Bin 0 -> 4874 bytes
 docs/static/img/Iceberg-logo.png                   |  Bin 0 -> 11068 bytes
 docs/static/img/Slack_Mark_Web.png                 |  Bin 0 -> 5990 bytes
 docs/static/img/asf.png                            |  Bin 0 -> 5863 bytes
 docs/static/img/contact-bg.jpg                     |  Bin 0 -> 215852 bytes
 docs/static/img/flink-logo.png                     |  Bin 0 -> 150244 bytes
 docs/static/img/hive-logo.png                      |  Bin 0 -> 76513 bytes
 docs/static/img/iceberg-logo-icon.png              |  Bin 0 -> 17608 bytes
 docs/static/img/iceberg-metadata.png               |  Bin 0 -> 140290 bytes
 docs/static/img/intro-bg.jpg                       |  Bin 0 -> 215852 bytes
 docs/static/img/partition-spec-evolution.png       |  Bin 0 -> 224020 bytes
 docs/static/img/prestodb-logo.png                  |  Bin 0 -> 4391 bytes
 docs/static/img/python.png                         |  Bin 0 -> 1084 bytes
 docs/static/img/screenshot.png                     |  Bin 0 -> 761931 bytes
 docs/static/img/services/hidden-partitioning.png   |  Bin 0 -> 353890 bytes
 docs/static/img/services/schema-evolution.png      |  Bin 0 -> 676552 bytes
 docs/static/img/services/time-travel.png           |  Bin 0 -> 721534 bytes
 docs/static/img/trino-logo.png                     |  Bin 0 -> 55240 bytes
 docs/themes/hugo-book/.github/workflows/main.yml   |   24 +
 docs/themes/hugo-book/.gitignore                   |    3 +
 docs/themes/hugo-book/LICENSE                      |   20 +
 docs/themes/hugo-book/README.md                    |  354 ++
 docs/themes/hugo-book/archetypes/docs.md           |   10 +
 docs/themes/hugo-book/archetypes/posts.md          |    6 +
 docs/themes/hugo-book/assets/_custom.scss          |    3 +
 docs/themes/hugo-book/assets/_defaults.scss        |   66 +
 docs/themes/hugo-book/assets/_fonts.scss           |   39 +
 docs/themes/hugo-book/assets/_main.scss            |  363 ++
 docs/themes/hugo-book/assets/_markdown.scss        |  192 +
 docs/themes/hugo-book/assets/_print.scss           |   17 +
 docs/themes/hugo-book/assets/_shortcodes.scss      |  104 +
 docs/themes/hugo-book/assets/_utils.scss           |   92 +
 docs/themes/hugo-book/assets/_variables.scss       |    3 +
 docs/themes/hugo-book/assets/book.scss             |   15 +
 docs/themes/hugo-book/assets/clipboard.js          |   21 +
 docs/themes/hugo-book/assets/manifest.json         |   15 +
 docs/themes/hugo-book/assets/menu-reset.js         |    7 +
 docs/themes/hugo-book/assets/mermaid.json          |    6 +
 docs/themes/hugo-book/assets/normalize.css         |  349 ++
 .../themes/hugo-book/assets/plugins/_numbered.scss |   36 +
 .../hugo-book/assets/plugins/_scrollbars.scss      |   26 +
 docs/themes/hugo-book/assets/search-data.json      |   15 +
 docs/themes/hugo-book/assets/search.js             |  104 +
 docs/themes/hugo-book/assets/sw-register.js        |    7 +
 docs/themes/hugo-book/assets/sw.js                 |   55 +
 docs/themes/hugo-book/assets/themes/_auto.scss     |    9 +
 docs/themes/hugo-book/assets/themes/_dark.scss     |    3 +
 docs/themes/hugo-book/assets/themes/_light.scss    |    3 +
 .../hugo-book/exampleSite/assets/_custom.scss      |    4 +
 .../hugo-book/exampleSite/assets/_variables.scss   |    1 +
 docs/themes/hugo-book/exampleSite/config.toml      |  118 +
 docs/themes/hugo-book/exampleSite/config.yaml      |  114 +
 .../hugo-book/exampleSite/content.bn/_index.md     |   79 +
 .../hugo-book/exampleSite/content.ru/_index.md     |   79 +
 .../hugo-book/exampleSite/content.zh/_index.md     |   79 +
 .../themes/hugo-book/exampleSite/content/_index.md |   41 +
 .../exampleSite/content/docs/example/_index.md     |   71 +
 .../docs/example/collapsed/3rd-level/4th-level.md  |   12 +
 .../docs/example/collapsed/3rd-level/_index.md     |   26 +
 .../content/docs/example/collapsed/_index.md       |    4 +
 .../exampleSite/content/docs/example/hidden.md     |   52 +
 .../docs/example/table-of-contents/_index.md       |   85 +
 .../docs/example/table-of-contents/with-toc.md     |   64 +
 .../docs/example/table-of-contents/without-toc.md  |   59 +
 .../exampleSite/content/docs/shortcodes/_index.md  |    3 +
 .../exampleSite/content/docs/shortcodes/buttons.md |   13 +
 .../exampleSite/content/docs/shortcodes/columns.md |   45 +
 .../exampleSite/content/docs/shortcodes/details.md |   22 +
 .../exampleSite/content/docs/shortcodes/expand.md  |   35 +
 .../exampleSite/content/docs/shortcodes/hints.md   |   32 +
 .../exampleSite/content/docs/shortcodes/katex.md   |   28 +
 .../exampleSite/content/docs/shortcodes/mermaid.md |   41 +
 .../content/docs/shortcodes/section/_index.md      |   15 +
 .../content/docs/shortcodes/section/first-page.md  |    6 +
 .../content/docs/shortcodes/section/second-page.md |    6 +
 .../exampleSite/content/docs/shortcodes/tabs.md    |   50 +
 .../hugo-book/exampleSite/content/menu/index.md    |   22 +
 .../hugo-book/exampleSite/content/posts/_index.md  |    7 +
 .../content/posts/creating-a-new-theme.md          | 1150 ++++
 .../exampleSite/content/posts/goisforlovers.md     |  344 ++
 .../exampleSite/content/posts/hugoisforlovers.md   |   89 +
 .../content/posts/migrate-from-jekyll.md           |  156 +
 docs/themes/hugo-book/go.mod                       |    3 +
 docs/themes/hugo-book/i18n/bn.yaml                 |   14 +
 docs/themes/hugo-book/i18n/cn.yaml                 |   21 +
 docs/themes/hugo-book/i18n/cs.yaml                 |   14 +
 docs/themes/hugo-book/i18n/de.yaml                 |   14 +
 docs/themes/hugo-book/i18n/en.yaml                 |   14 +
 docs/themes/hugo-book/i18n/es.yaml                 |   14 +
 docs/themes/hugo-book/i18n/fa.yaml                 |   20 +
 docs/themes/hugo-book/i18n/fr.yaml                 |   14 +
 docs/themes/hugo-book/i18n/it.yaml                 |   14 +
 docs/themes/hugo-book/i18n/ja.yaml                 |   20 +
 docs/themes/hugo-book/i18n/jp.yaml                 |   21 +
 docs/themes/hugo-book/i18n/ko.yaml                 |   20 +
 docs/themes/hugo-book/i18n/nb.yaml                 |   14 +
 docs/themes/hugo-book/i18n/pt.yaml                 |   14 +
 docs/themes/hugo-book/i18n/ru.yaml                 |   14 +
 docs/themes/hugo-book/i18n/sv.yaml                 |   14 +
 docs/themes/hugo-book/i18n/tr.yaml                 |   14 +
 docs/themes/hugo-book/i18n/uk.yaml                 |   14 +
 docs/themes/hugo-book/i18n/zh-TW.yaml              |   20 +
 docs/themes/hugo-book/i18n/zh.yaml                 |   20 +
 docs/themes/hugo-book/images/screenshot.png        |  Bin 0 -> 189080 bytes
 docs/themes/hugo-book/images/tn.png                |  Bin 0 -> 195683 bytes
 docs/themes/hugo-book/layouts/404.html             |   34 +
 .../layouts/_default/_markup/render-heading.html   |    4 +
 .../layouts/_default/_markup/render-image.html     |   19 +
 .../layouts/_default/_markup/render-link.html      |   28 +
 docs/themes/hugo-book/layouts/_default/baseof.html |   83 +
 docs/themes/hugo-book/layouts/_default/list.html   |    1 +
 docs/themes/hugo-book/layouts/_default/single.html |    1 +
 .../hugo-book/layouts/partials/docs/brand.html     |    8 +
 .../hugo-book/layouts/partials/docs/comments.html  |    2 +
 .../hugo-book/layouts/partials/docs/date.html      |    6 +
 .../hugo-book/layouts/partials/docs/footer.html    |   28 +
 .../hugo-book/layouts/partials/docs/header.html    |   13 +
 .../layouts/partials/docs/html-head-title.html     |    1 +
 .../hugo-book/layouts/partials/docs/html-head.html |   51 +
 .../layouts/partials/docs/inject/body.html         |    0
 .../partials/docs/inject/content-after.html        |    0
 .../partials/docs/inject/content-before.html       |    0
 .../layouts/partials/docs/inject/footer.html       |    0
 .../layouts/partials/docs/inject/head.html         |    0
 .../layouts/partials/docs/inject/menu-after.html   |    0
 .../layouts/partials/docs/inject/menu-before.html  |    0
 .../layouts/partials/docs/inject/toc-after.html    |    0
 .../layouts/partials/docs/inject/toc-before.html   |    0
 .../hugo-book/layouts/partials/docs/languages.html |   33 +
 .../layouts/partials/docs/menu-bundle.html         |    5 +
 .../layouts/partials/docs/menu-filetree.html       |   45 +
 .../hugo-book/layouts/partials/docs/menu-hugo.html |   28 +
 .../hugo-book/layouts/partials/docs/menu.html      |   25 +
 .../hugo-book/layouts/partials/docs/post-meta.html |   23 +
 .../hugo-book/layouts/partials/docs/search.html    |    7 +
 .../hugo-book/layouts/partials/docs/taxonomy.html  |   19 +
 .../hugo-book/layouts/partials/docs/title.html     |   15 +
 .../hugo-book/layouts/partials/docs/toc.html       |    3 +
 docs/themes/hugo-book/layouts/posts/list.html      |   22 +
 docs/themes/hugo-book/layouts/posts/single.html    |   13 +
 .../hugo-book/layouts/shortcodes/button.html       |   12 +
 .../hugo-book/layouts/shortcodes/columns.html      |    7 +
 .../hugo-book/layouts/shortcodes/details.html      |    7 +
 .../hugo-book/layouts/shortcodes/expand.html       |   13 +
 docs/themes/hugo-book/layouts/shortcodes/hint.html |    3 +
 .../themes/hugo-book/layouts/shortcodes/katex.html |   13 +
 .../hugo-book/layouts/shortcodes/mermaid.html      |   12 +
 .../hugo-book/layouts/shortcodes/section.html      |   10 +
 docs/themes/hugo-book/layouts/shortcodes/tab.html  |   12 +
 docs/themes/hugo-book/layouts/shortcodes/tabs.html |   15 +
 docs/themes/hugo-book/layouts/taxonomy/list.html   |   13 +
 .../hugo-book/layouts/taxonomy/taxonomy.html       |   22 +
 docs/themes/hugo-book/static/favicon.png           |  Bin 0 -> 109 bytes
 docs/themes/hugo-book/static/favicon.svg           |    1 +
 docs/themes/hugo-book/static/flexsearch.min.js     |   42 +
 .../fonts/roboto-mono-v13-latin-regular.woff       |  Bin 0 -> 15160 bytes
 .../fonts/roboto-mono-v13-latin-regular.woff2      |  Bin 0 -> 12312 bytes
 .../static/fonts/roboto-v27-latin-700.woff         |  Bin 0 -> 20396 bytes
 .../static/fonts/roboto-v27-latin-700.woff2        |  Bin 0 -> 15828 bytes
 .../static/fonts/roboto-v27-latin-regular.woff     |  Bin 0 -> 20332 bytes
 .../static/fonts/roboto-v27-latin-regular.woff2    |  Bin 0 -> 15688 bytes
 .../hugo-book/static/katex/auto-render.min.js      |    1 +
 .../static/katex/fonts/KaTeX_AMS-Regular.ttf       |  Bin 0 -> 70972 bytes
 .../static/katex/fonts/KaTeX_AMS-Regular.woff      |  Bin 0 -> 38868 bytes
 .../static/katex/fonts/KaTeX_AMS-Regular.woff2     |  Bin 0 -> 32944 bytes
 .../static/katex/fonts/KaTeX_Caligraphic-Bold.ttf  |  Bin 0 -> 19316 bytes
 .../static/katex/fonts/KaTeX_Caligraphic-Bold.woff |  Bin 0 -> 11696 bytes
 .../katex/fonts/KaTeX_Caligraphic-Bold.woff2       |  Bin 0 -> 10448 bytes
 .../katex/fonts/KaTeX_Caligraphic-Regular.ttf      |  Bin 0 -> 18684 bytes
 .../katex/fonts/KaTeX_Caligraphic-Regular.woff     |  Bin 0 -> 11460 bytes
 .../katex/fonts/KaTeX_Caligraphic-Regular.woff2    |  Bin 0 -> 10240 bytes
 .../static/katex/fonts/KaTeX_Fraktur-Bold.ttf      |  Bin 0 -> 35660 bytes
 .../static/katex/fonts/KaTeX_Fraktur-Bold.woff     |  Bin 0 -> 22632 bytes
 .../static/katex/fonts/KaTeX_Fraktur-Bold.woff2    |  Bin 0 -> 20360 bytes
 .../static/katex/fonts/KaTeX_Fraktur-Regular.ttf   |  Bin 0 -> 34352 bytes
 .../static/katex/fonts/KaTeX_Fraktur-Regular.woff  |  Bin 0 -> 22088 bytes
 .../static/katex/fonts/KaTeX_Fraktur-Regular.woff2 |  Bin 0 -> 19784 bytes
 .../static/katex/fonts/KaTeX_Main-Bold.ttf         |  Bin 0 -> 60784 bytes
 .../static/katex/fonts/KaTeX_Main-Bold.woff        |  Bin 0 -> 35464 bytes
 .../static/katex/fonts/KaTeX_Main-Bold.woff2       |  Bin 0 -> 30244 bytes
 .../static/katex/fonts/KaTeX_Main-BoldItalic.ttf   |  Bin 0 -> 44496 bytes
 .../static/katex/fonts/KaTeX_Main-BoldItalic.woff  |  Bin 0 -> 25352 bytes
 .../static/katex/fonts/KaTeX_Main-BoldItalic.woff2 |  Bin 0 -> 21944 bytes
 .../static/katex/fonts/KaTeX_Main-Italic.ttf       |  Bin 0 -> 47640 bytes
 .../static/katex/fonts/KaTeX_Main-Italic.woff      |  Bin 0 -> 26228 bytes
 .../static/katex/fonts/KaTeX_Main-Italic.woff2     |  Bin 0 -> 22748 bytes
 .../static/katex/fonts/KaTeX_Main-Regular.ttf      |  Bin 0 -> 69520 bytes
 .../static/katex/fonts/KaTeX_Main-Regular.woff     |  Bin 0 -> 38112 bytes
 .../static/katex/fonts/KaTeX_Main-Regular.woff2    |  Bin 0 -> 32464 bytes
 .../static/katex/fonts/KaTeX_Math-BoldItalic.ttf   |  Bin 0 -> 39308 bytes
 .../static/katex/fonts/KaTeX_Math-BoldItalic.woff  |  Bin 0 -> 22324 bytes
 .../static/katex/fonts/KaTeX_Math-BoldItalic.woff2 |  Bin 0 -> 19720 bytes
 .../static/katex/fonts/KaTeX_Math-Italic.ttf       |  Bin 0 -> 40992 bytes
 .../static/katex/fonts/KaTeX_Math-Italic.woff      |  Bin 0 -> 22844 bytes
 .../static/katex/fonts/KaTeX_Math-Italic.woff2     |  Bin 0 -> 20096 bytes
 .../static/katex/fonts/KaTeX_SansSerif-Bold.ttf    |  Bin 0 -> 33688 bytes
 .../static/katex/fonts/KaTeX_SansSerif-Bold.woff   |  Bin 0 -> 18516 bytes
 .../static/katex/fonts/KaTeX_SansSerif-Bold.woff2  |  Bin 0 -> 15732 bytes
 .../static/katex/fonts/KaTeX_SansSerif-Italic.ttf  |  Bin 0 -> 30960 bytes
 .../static/katex/fonts/KaTeX_SansSerif-Italic.woff |  Bin 0 -> 17572 bytes
 .../katex/fonts/KaTeX_SansSerif-Italic.woff2       |  Bin 0 -> 15024 bytes
 .../static/katex/fonts/KaTeX_SansSerif-Regular.ttf |  Bin 0 -> 29812 bytes
 .../katex/fonts/KaTeX_SansSerif-Regular.woff       |  Bin 0 -> 16228 bytes
 .../katex/fonts/KaTeX_SansSerif-Regular.woff2      |  Bin 0 -> 13708 bytes
 .../static/katex/fonts/KaTeX_Script-Regular.ttf    |  Bin 0 -> 24620 bytes
 .../static/katex/fonts/KaTeX_Script-Regular.woff   |  Bin 0 -> 13428 bytes
 .../static/katex/fonts/KaTeX_Script-Regular.woff2  |  Bin 0 -> 12064 bytes
 .../static/katex/fonts/KaTeX_Size1-Regular.ttf     |  Bin 0 -> 12916 bytes
 .../static/katex/fonts/KaTeX_Size1-Regular.woff    |  Bin 0 -> 6696 bytes
 .../static/katex/fonts/KaTeX_Size1-Regular.woff2   |  Bin 0 -> 5592 bytes
 .../static/katex/fonts/KaTeX_Size2-Regular.ttf     |  Bin 0 -> 12172 bytes
 .../static/katex/fonts/KaTeX_Size2-Regular.woff    |  Bin 0 -> 6436 bytes
 .../static/katex/fonts/KaTeX_Size2-Regular.woff2   |  Bin 0 -> 5392 bytes
 .../static/katex/fonts/KaTeX_Size3-Regular.ttf     |  Bin 0 -> 8120 bytes
 .../static/katex/fonts/KaTeX_Size3-Regular.woff    |  Bin 0 -> 4568 bytes
 .../static/katex/fonts/KaTeX_Size3-Regular.woff2   |  Bin 0 -> 3728 bytes
 .../static/katex/fonts/KaTeX_Size4-Regular.ttf     |  Bin 0 -> 11016 bytes
 .../static/katex/fonts/KaTeX_Size4-Regular.woff    |  Bin 0 -> 6184 bytes
 .../static/katex/fonts/KaTeX_Size4-Regular.woff2   |  Bin 0 -> 5028 bytes
 .../katex/fonts/KaTeX_Typewriter-Regular.ttf       |  Bin 0 -> 35924 bytes
 .../katex/fonts/KaTeX_Typewriter-Regular.woff      |  Bin 0 -> 20260 bytes
 .../katex/fonts/KaTeX_Typewriter-Regular.woff2     |  Bin 0 -> 17272 bytes
 docs/themes/hugo-book/static/katex/katex.min.css   |    1 +
 docs/themes/hugo-book/static/katex/katex.min.js    |    1 +
 docs/themes/hugo-book/static/mermaid.min.js        |   32 +
 docs/themes/hugo-book/static/svg/calendar.svg      |    1 +
 docs/themes/hugo-book/static/svg/edit.svg          |    1 +
 docs/themes/hugo-book/static/svg/menu.svg          |    1 +
 docs/themes/hugo-book/static/svg/toc.svg           |    1 +
 docs/themes/hugo-book/static/svg/translate.svg     |    1 +
 docs/themes/hugo-book/theme.toml                   |   16 +
 landing-page/.hugo_build.lock                      |    0
 landing-page/_default2/list.html                   |    0
 landing-page/_default2/single.html                 |    0
 landing-page/archetypes/default.md                 |    6 +
 landing-page/asciinema/README.md                   |   73 +
 landing-page/asciinema/generate_asciinema_cast.py  |   71 +
 landing-page/asciinema/schema_evolution.py         |   70 +
 landing-page/asciinema/time_travel.py              |   51 +
 landing-page/config.toml                           |   20 +
 landing-page/content/about/about.md                |   24 +
 landing-page/content/common/community/blogs.md     |  103 +
 landing-page/content/common/community/join.md      |   92 +
 landing-page/content/common/community/talks.md     |   33 +
 landing-page/content/common/format/spec.md         | 1090 ++++
 landing-page/content/common/format/terms.md        |   64 +
 landing-page/content/common/project/_index.md      |    6 +
 landing-page/content/common/project/benchmarks.md  |  134 +
 .../content/common/project/how-to-release.md       |  200 +
 landing-page/content/common/project/roadmap.md     |   61 +
 landing-page/content/common/project/security.md    |   34 +
 landing-page/content/common/project/trademarks.md  |   24 +
 .../content/common/releases/release-notes.md       |  261 +
 landing-page/content/posts/community/blogs.md      |  103 +
 landing-page/content/posts/community/join.md       |   92 +
 landing-page/content/posts/community/talks.md      |   33 +
 landing-page/content/posts/format/spec.md          | 1088 ++++
 landing-page/content/posts/format/terms.md         |   64 +
 landing-page/content/posts/project/benchmarks.md   |  134 +
 .../content/posts/project/how-to-release.md        |  200 +
 landing-page/content/posts/project/roadmap.md      |   61 +
 landing-page/content/posts/project/security.md     |   34 +
 landing-page/content/posts/project/trademarks.md   |   24 +
 .../content/posts/releases/release-notes.md        |  261 +
 .../content/services/hidden-partitioning.md        |   26 +
 landing-page/content/services/schema-evolution.md  |   25 +
 landing-page/content/services/time-travel.md       |   26 +
 landing-page/layouts/_default/single.html          |   13 +
 landing-page/layouts/index.html                    |    7 +
 landing-page/layouts/partials/about.html           |   15 +
 landing-page/layouts/partials/contact.html         |   16 +
 landing-page/layouts/partials/footer.html          |   16 +
 landing-page/layouts/partials/head.html            |   48 +
 landing-page/layouts/partials/header.html          |   93 +
 landing-page/layouts/partials/js.html              |   27 +
 landing-page/layouts/partials/pricing.html         |   38 +
 landing-page/layouts/partials/services.html        |   82 +
 landing-page/layouts/partials/social.html          |    8 +
 .../layouts/shortcodes/icebergVersion.html         |    1 +
 landing-page/layouts/shortcodes/nessieVersion.html |    1 +
 landing-page/static/asciinema/asciinema-player.css | 2567 ++++++++
 .../static/asciinema/asciinema-player.css.back     | 2567 ++++++++
 .../static/asciinema/asciinema-player.min.js       |    1 +
 .../static/asciinema/asciinema-player.min.js.back  |    1 +
 .../static/asciinema/schema_evolution.cast         |  296 +
 landing-page/static/asciinema/time_travel.cast     |  260 +
 landing-page/static/css/bootstrap.css              | 6199 ++++++++++++++++++++
 landing-page/static/css/katex.min.css              |    1 +
 landing-page/static/css/landing-page.css           |  219 +
 landing-page/static/css/markdown.css               |  959 +++
 landing-page/static/css/pricing.css                |  133 +
 landing-page/static/favicon-16x16.png              |  Bin 0 -> 1496 bytes
 landing-page/static/favicon-32x32.png              |  Bin 0 -> 2101 bytes
 landing-page/static/favicon-96x96.png              |  Bin 0 -> 5456 bytes
 landing-page/static/favicon.ico                    |  Bin 0 -> 1150 bytes
 landing-page/static/favicon.png                    |  Bin 0 -> 5456 bytes
 .../static/font-awesome-4.7.0/HELP-US-OUT.txt      |    7 +
 .../font-awesome-4.7.0/css/font-awesome.min.css    |    4 +
 .../font-awesome-4.7.0/fonts/FontAwesome.otf       |  Bin 0 -> 134808 bytes
 .../fonts/fontawesome-webfont.eot                  |  Bin 0 -> 165742 bytes
 .../fonts/fontawesome-webfont.svg                  | 2671 +++++++++
 .../fonts/fontawesome-webfont.ttf                  |  Bin 0 -> 165548 bytes
 .../fonts/fontawesome-webfont.woff                 |  Bin 0 -> 98024 bytes
 .../fonts/fontawesome-webfont.woff2                |  Bin 0 -> 77160 bytes
 .../static/font-awesome-4.7.0/less/animated.less   |   34 +
 .../font-awesome-4.7.0/less/bordered-pulled.less   |   25 +
 .../static/font-awesome-4.7.0/less/core.less       |   12 +
 .../font-awesome-4.7.0/less/fixed-width.less       |    6 +
 .../font-awesome-4.7.0/less/font-awesome.less      |   18 +
 .../static/font-awesome-4.7.0/less/icons.less      |  789 +++
 .../static/font-awesome-4.7.0/less/larger.less     |   13 +
 .../static/font-awesome-4.7.0/less/list.less       |   19 +
 .../static/font-awesome-4.7.0/less/mixins.less     |   60 +
 .../static/font-awesome-4.7.0/less/path.less       |   15 +
 .../font-awesome-4.7.0/less/rotated-flipped.less   |   20 +
 .../font-awesome-4.7.0/less/screen-reader.less     |    5 +
 .../static/font-awesome-4.7.0/less/stacked.less    |   20 +
 .../static/font-awesome-4.7.0/less/variables.less  |  800 +++
 .../static/font-awesome-4.7.0/scss/_animated.scss  |   34 +
 .../font-awesome-4.7.0/scss/_bordered-pulled.scss  |   25 +
 .../static/font-awesome-4.7.0/scss/_core.scss      |   12 +
 .../font-awesome-4.7.0/scss/_fixed-width.scss      |    6 +
 .../static/font-awesome-4.7.0/scss/_icons.scss     |  789 +++
 .../static/font-awesome-4.7.0/scss/_larger.scss    |   13 +
 .../static/font-awesome-4.7.0/scss/_list.scss      |   19 +
 .../static/font-awesome-4.7.0/scss/_mixins.scss    |   60 +
 .../static/font-awesome-4.7.0/scss/_path.scss      |   15 +
 .../font-awesome-4.7.0/scss/_rotated-flipped.scss  |   20 +
 .../font-awesome-4.7.0/scss/_screen-reader.scss    |    5 +
 .../static/font-awesome-4.7.0/scss/_stacked.scss   |   20 +
 .../static/font-awesome-4.7.0/scss/_variables.scss |  800 +++
 .../font-awesome-4.7.0/scss/font-awesome.scss      |   18 +
 .../static/fonts/glyphicons-halflings-regular.eot  |  Bin 0 -> 20335 bytes
 .../static/fonts/glyphicons-halflings-regular.svg  |  229 +
 .../static/fonts/glyphicons-halflings-regular.ttf  |  Bin 0 -> 41280 bytes
 .../static/fonts/glyphicons-halflings-regular.woff |  Bin 0 -> 23320 bytes
 landing-page/static/img/contact-bg.jpg             |  Bin 0 -> 215852 bytes
 landing-page/static/img/iceberg-logo-icon.png      |  Bin 0 -> 17608 bytes
 landing-page/static/img/intro-bg.jpg               |  Bin 0 -> 215852 bytes
 .../static/img/partition-spec-evolution.png        |  Bin 0 -> 224020 bytes
 .../static/img/services/hidden-partitioning.png    |  Bin 0 -> 353890 bytes
 .../static/img/services/schema-evolution.png       |  Bin 0 -> 676552 bytes
 landing-page/static/img/services/time-travel.png   |  Bin 0 -> 721534 bytes
 landing-page/static/js/bootstrap.js                | 2114 +++++++
 landing-page/static/js/bootstrap.min.js            |    6 +
 landing-page/static/js/jquery-1.11.0.js            |    4 +
 landing-page/static/js/jquery.easing.min.js        |   44 +
 landing-page/static/js/landing-page.js             |   31 +
 redirects/0.12.0/index.html                        |   19 +
 redirects/0.12.1/index.html                        |   19 +
 redirects/docs/index.html                          |   19 +
 redirects/getting-started/index.html               |   19 +
 redirects/latest/index.html                        |   19 +
 515 files changed, 63322 insertions(+), 2 deletions(-)

diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml
new file mode 100644
index 0000000..d4e9469
--- /dev/null
+++ b/.github/workflows/deploy.yml
@@ -0,0 +1,95 @@
+name: github pages
+
+on: [push, pull_request]
+
+jobs:
+  deploy-landing-page:
+    if: github.ref == 'refs/heads/main'
+    runs-on: ubuntu-20.04
+    steps:
+      - uses: actions/checkout@v2
+
+      - name: Set output
+        id: vars
+        run: echo ::set-output name=branch_name::${GITHUB_REF#refs/*/}
+
+      - name: Set baseURL in config.toml
+        uses: ciiiii/toml-editor@1.0.0
+        with:
+          file: "landing-page/config.toml"
+          key: "baseURL"
+          value: "https://iceberg.apache.org/"
+
+      - name: Set params.docsBaseURL in config.toml
+        uses: ciiiii/toml-editor@1.0.0
+        with:
+          file: "landing-page/config.toml"
+          key: "params.docsBaseURL"
+          value: "https://iceberg.apache.org/docs/${{ steps.vars.outputs.branch_name }}"
+
+      - name: Setup Hugo
+        uses: peaceiris/actions-hugo@v2
+        with:
+          hugo-version: 'latest'
+          extended: true
+
+      - name: Build
+        run: cd landing-page && hugo --minify
+
+      - name: Deploy
+        uses: peaceiris/actions-gh-pages@v3
+        with:
+          github_token: ${{ secrets.GITHUB_TOKEN }}
+          publish_dir: ./landing-page/public
+          publish_branch: asf-site
+          destination_dir: ./
+          keep_files: true
+      - name: Deploy redirects
+        uses: peaceiris/actions-gh-pages@v3
+        with:
+          github_token: ${{ secrets.GITHUB_TOKEN }}
+          publish_dir: ./redirects
+          publish_branch: asf-site
+          destination_dir: ./
+          keep_files: true
+  deploy-docs:
+    if: github.ref != 'refs/heads/main'
+    runs-on: ubuntu-20.04
+    steps:
+      - uses: actions/checkout@v2
+      
+      - name: Set output
+        id: vars
+        run: echo ::set-output name=branch_name::${GITHUB_REF#refs/*/}
+
+      - name: Set baseURL in config.toml
+        uses: ciiiii/toml-editor@1.0.0
+        with:
+          file: "docs/config.toml"
+          key: "baseURL"
+          value: "https://iceberg.apache.org/docs/${{ steps.vars.outputs.branch_name }}"
+
+      - name: Set params.version in config.toml
+        uses: ciiiii/toml-editor@1.0.0
+        with:
+          file: "docs/config.toml"
+          key: "params.versions.iceberg"
+          value: "${{ steps.vars.outputs.branch_name }}"
+      
+      - name: Setup Hugo
+        uses: peaceiris/actions-hugo@v2
+        with:
+          hugo-version: 'latest'
+          extended: true
+
+      - name: Build
+        run: cd docs && hugo --minify
+
+      - name: Deploy
+        uses: peaceiris/actions-gh-pages@v3
+        with:
+          github_token: ${{ secrets.GITHUB_TOKEN }}
+          publish_dir: ./docs/public
+          publish_branch: asf-site
+          destination_dir: ./docs/${{ steps.vars.outputs.branch_name }}
+
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..456f428
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,11 @@
+# Hugo generated directories
+/public
+resources
+
+# Script Generated Asciinema Recordings
+asciinema/output
+
+# Other
+__pycache__
+.DS_Store
+.vscode
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 0000000..12744dd
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "themes/hugo-book"]
+	path = themes/hugo-book
+	url = https://github.com/alex-shpak/hugo-book
diff --git a/README.md b/README.md
index 1fc1e82..61a9b85 100644
--- a/README.md
+++ b/README.md
@@ -1,3 +1,93 @@
-## Iceberg Docs
+<!--
+  - Licensed to the Apache Software Foundation (ASF) under one
+  - or more contributor license agreements.  See the NOTICE file
+  - distributed with this work for additional information
+  - regarding copyright ownership.  The ASF licenses this file
+  - to you under the Apache License, Version 2.0 (the
+  - "License"); you may not use this file except in compliance
+  - with the License.  You may obtain a copy of the License at
+  -
+  -   http://www.apache.org/licenses/LICENSE-2.0
+  -
+  - Unless required by applicable law or agreed to in writing,
+  - software distributed under the License is distributed on an
+  - "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  - KIND, either express or implied.  See the License for the
+  - specific language governing permissions and limitations
+  - under the License.
+  -->
 
-This repository contains the markdown documentation hosted at https://iceberg.apache.org.
+# Apache Iceberg Documentation Site
+
+This repository contains the documentation for [Apache Iceberg](https://github.com/apache/iceberg).
+It's built with [Hugo](https://gohugo.io/) and hosted at https://iceberg.apache.org.
+
+# Structure
+
+The Iceberg documentation site is actually constructed from two hugo sites. The first, is the landing page which is
+an entire site on it's own. The second, is the documentation site which contains the full Iceberg documentation.
+The landing page and documentation sites are completely self-contained in the `./landing-page` and `./docs` directories,
+respectively.
+
+# Landing Page Deployment
+
+The landing page site is automatically deployed to the root of the `asf-site` branch by the `deploy-landing-page`
+job in the [deployment workflow](./.github/workflows/deploy.yml). There is only a single version of the landing
+page site, and the `deploy-landing-page` job only runs on commits to the `main` branch.
+
+# Docs Deployment
+
+The docs site is automatically deployed to the `docs` directory in the asf-site branch, into a sub-directory
+named after the branch where the commit occured. This is performed by the `deploy-docs` job in the
+[deployment workflow](./.github/workflows/deploy.yml). The job deploys the docs site on commits to any branch
+**except** `main`. A branch is maintained for each Iceberg version. If the job runs and the directory does not
+yet exist in the `asf-site` branch, it will be created.
+
+#### Latest Docs
+In [./docs/redirect/index.html](./docs/redirect/index.html), a redirect meta tag exists to forward the `/docs` 
+and `/latest` to `/docs/0.12.1`.
+
+# `asf-site` Branch Structure
+
+The `asf-site` branch structure is the following:
+```
+.
+├── docs
+│   ├── 0.12.1
+│   │   └── <Full Docs Site @0.12.1 (also, `/latest` redirects here)>
+│   ├── 0.12.0
+│   │   └── <Full Docs Site @0.12.0>
+│   └── index.html  <-- Includes a redirect to 0.12.1
+└── <Full Landing Page Site>
+```
+
+A non-`main` branch commit deploys the docs site **only** and creates a new directory in the
+`docs` directory.  A `main` branch commit deploys the landing page site **only** and overwrites
+the landing page site at the root of the `asf-site` branch.
+
+# Redirects
+
+Redirects within one of the two sites can easily be done using the `aliases` keyword in the YAML Front Matter.
+You can read more about this Hugo URL Management feature [here](https://gohugo.io/content-management/urls/#yaml-front-matter).
+
+For root level redirects that are outside of both sites, the `./redirects` directory contains pages with redirect `meta` tags.
+These are all deployed at the root level of the `asf-site` branch by the `Deploy redirects` step in the [deployment workflow](./.github/workflows/deploy.yml).
+
+# Running Locally
+
+To start the landing page site locally, clone this repository and run the following.
+```
+git clone git@github.com:apache/iceberg-docs.git
+cd landing-page && hugo serve
+```
+
+To start the documentation site locally, clone this repository and run the following.
+```
+git clone git@github.com:apache/iceberg-docs.git
+git submodule update --init
+cd docs && hugo serve
+```
+
+# Scanning For Broken Links
+
+If you'd like to scan for broken links, one available tool is linkcheck that can be found [here](https://github.com/filiph/linkcheck).
\ No newline at end of file
diff --git a/docs/.hugo_build.lock b/docs/.hugo_build.lock
new file mode 100644
index 0000000..e69de29
diff --git a/docs/_default2/list.html b/docs/_default2/list.html
new file mode 100644
index 0000000..e69de29
diff --git a/docs/_default2/single.html b/docs/_default2/single.html
new file mode 100644
index 0000000..e69de29
diff --git a/docs/archetypes/default.md b/docs/archetypes/default.md
new file mode 100644
index 0000000..00e77bd
--- /dev/null
+++ b/docs/archetypes/default.md
@@ -0,0 +1,6 @@
+---
+title: "{{ replace .Name "-" " " | title }}"
+date: {{ .Date }}
+draft: true
+---
+
diff --git a/docs/assets/_custom.scss b/docs/assets/_custom.scss
new file mode 100644
index 0000000..612762e
--- /dev/null
+++ b/docs/assets/_custom.scss
@@ -0,0 +1,78 @@
+.navigation-icon {
+  width: 1.28571429em;
+  max-height: 14px;
+}
+
+li.navigation-icon-pad {
+  padding-left: 1.28571429em;
+}
+
+  /*Navigation fa icons*/
+.fa-table {
+  color: #000000;
+}
+
+.fa-handshake-o {
+  color: #000000;
+}
+
+.fa-object-ungroup {
+  color: #000000;
+}
+
+.fa-star-o {
+  color: #E25A1D;
+}
+
+.top-external-icon {
+  height: 50px;
+  padding-top: 20px;
+  padding-right: 10px;
+  white-space: nowrap;
+  font-size: 18px;
+}
+
+#version-shield {
+  height: 1.5em;
+  width: unset;
+}
+
+#book-search-results {
+  position: absolute;
+}
+
+ul#book-search-results {
+  padding: 0;
+  margin: 0;
+  list-style-type: none;
+  float: left;
+  width: 100%;
+  background-color: white;
+  z-index: 10;
+}
+
+ul#book-search-results li small {
+  color: rgb(112, 128, 144);
+}
+
+.book-section-flat {
+  margin: $padding-16 * 1.2 0;
+  > a {
+    font-weight: normal;
+  }
+  > label {
+    font-weight: normal;
+  }
+}
+
+
+aside nav ul {
+  li {
+    margin: 0.5em 0;
+  }
+}
+
+
+.book-menu {
+  font-size: 0.8rem;
+}
\ No newline at end of file
diff --git a/docs/assets/search.js b/docs/assets/search.js
new file mode 100644
index 0000000..54ee547
--- /dev/null
+++ b/docs/assets/search.js
@@ -0,0 +1,116 @@
+'use strict';
+
+{{ $searchDataFile := printf "%s.search-data.json" .Language.Lang }}
+{{ $searchData := resources.Get "search-data.json" | resources.ExecuteAsTemplate $searchDataFile . | resources.Minify | resources.Fingerprint }}
+
+(function () {
+
+  const searchDataURL = '{{ $searchData.RelPermalink }}';
+
+  const input = document.querySelector('#book-search-input');
+  const results = document.querySelector('#book-search-results');
+
+  if (!input) {
+    return
+  }
+
+  input.addEventListener('focus', init);
+  input.addEventListener('keyup', search);
+
+  document.addEventListener('keypress', focusSearchFieldOnKeyPress);
+
+  /**
+   * @param {Event} event
+   */
+  function focusSearchFieldOnKeyPress(event) {
+    if (event.target.value !== undefined) {
+      return;
+    }
+
+    if (input === document.activeElement) {
+      return;
+    }
+
+    const characterPressed = String.fromCharCode(event.charCode);
+    if (!isHotkey(characterPressed)) {
+      return;
+    }
+
+    input.focus();
+    event.preventDefault();
+  }
+
+  /**
+   * @param {String} character
+   * @returns {Boolean} 
+   */
+  function isHotkey(character) {
+    const dataHotkeys = input.getAttribute('data-hotkeys') || '';
+    return dataHotkeys.indexOf(character) >= 0;
+  }
+
+  function init() {
+    input.removeEventListener('focus', init); // init once
+    input.required = true;
+    fetch(searchDataURL)
+      .then(pages => pages.json())
+      .then(pages => {
+        window.pages = pages;
+      })
+      .then(() => input.required = false)
+      .then(search);
+  }
+
+  /**
+   * This generates a search preview given the content and the query which is assumed
+   * to be an exact-match query. extraLength defines how many characters before and after the
+   * search match that you want to include. The search match is wrapped in a bold tag.
+   * @param {String} content
+   * @param {String} query
+   * @param {Number} extraLength
+   * @returns {String} 
+   */
+  function getSearchPreview(page, query, extraLength) {
+      const resultLocation = page.content.toLowerCase().indexOf(query.toLowerCase());
+      return `<a href="${page.href}">...` +
+      page.content.substring(resultLocation-extraLength, resultLocation) +
+      "<b>" +
+      page.content.substring(resultLocation, resultLocation + query.length) +
+      "</b>" +
+      page.content.substring(resultLocation + query.length, resultLocation + extraLength) +
+      "..." +
+      "</a>"
+  }
+  function search() {
+    while (results.firstChild) {
+      results.removeChild(results.firstChild);
+    }
+
+    if (!input.value) {
+      return;
+    }
+
+    const searchHits = window.pages.filter(page => page.content.toLowerCase().includes(input.value.toLowerCase()) )
+    searchHits.forEach(function (page) {
+
+      var contentPreview = getSearchPreview(page, input.value, 30);
+
+      const li = element('<li><p></p><small></small></li>');
+      const p = li.querySelector('p'), small = li.querySelector('small');
+      p.textContent = page.title;
+      small.innerHTML = contentPreview;
+
+      results.appendChild(li);
+    });
+  }
+
+  /**
+   * @param {String} content
+   * @returns {Node}
+   */
+  function element(content) {
+    const div = document.createElement('div');
+    div.innerHTML = content;
+    return div.firstChild;
+  }
+})();
diff --git a/docs/config.toml b/docs/config.toml
new file mode 100644
index 0000000..bd4028d
--- /dev/null
+++ b/docs/config.toml
@@ -0,0 +1,11 @@
+baseURL = ""  # This is populated by the github deploy workflow and is equal to "<domainName>/docs/<version|latest>"
+languageCode = "en-us"
+title = "Apache Iceberg"
+theme= "hugo-book"
+
+[params]
+  BookLogo = "img/iceberg-logo-icon.png"
+  versions.iceberg = "" # This is populated by the github deploy workflow and is equal to the branch name
+  versions.nessie = "0.15.1"
+  latestVersions.iceberg = "0.12.1"  # This is used for the version badge on the "latest" site version
+  BookSection='docs' # This determines which directory will inform the left navigation menu
diff --git a/docs/content/_index.md b/docs/content/_index.md
new file mode 100644
index 0000000..767e5cc
--- /dev/null
+++ b/docs/content/_index.md
@@ -0,0 +1,54 @@
+---
+title: Introduction
+type: docs
+---
+<!--
+ - Licensed to the Apache Software Foundation (ASF) under one or more
+ - contributor license agreements.  See the NOTICE file distributed with
+ - this work for additional information regarding copyright ownership.
+ - The ASF licenses this file to You under the Apache License, Version 2.0
+ - (the "License"); you may not use this file except in compliance with
+ - the License.  You may obtain a copy of the License at
+ -
+ -   http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing, software
+ - distributed under the License is distributed on an "AS IS" BASIS,
+ - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ - See the License for the specific language governing permissions and
+ - limitations under the License.
+ -->
+
+![Iceberg](./img/Iceberg-logo.png)
+
+**Apache Iceberg is an open table format for huge analytic datasets.** Iceberg adds tables to compute engines including Spark, Trino, PrestoDB, Flink and Hive using a high-performance table format that works just like a SQL table.
+
+### User experience
+
+Iceberg avoids unpleasant surprises. Schema evolution works and won't inadvertently un-delete data. Users don't need to know about partitioning to get fast queries.
+
+* [Schema evolution](evolution#schema-evolution) supports add, drop, update, or rename, and has [no side-effects](evolution#correctness)
+* [Hidden partitioning](partitioning) prevents user mistakes that cause silently incorrect results or extremely slow queries
+* [Partition layout evolution](evolution#partition-evolution) can update the layout of a table as data volume or query patterns change
+* [Time travel](spark-queries#time-travel) enables reproducible queries that use exactly the same table snapshot, or lets users easily examine changes
+* Version rollback allows users to quickly correct problems by resetting tables to a good state
+
+### Reliability and performance
+
+Iceberg was built for huge tables. Iceberg is used in production where a single table can contain tens of petabytes of data and even these huge tables can be read without a distributed SQL engine.
+
+* [Scan planning is fast](performance#scan-planning) -- a distributed SQL engine isn't needed to read a table or find files
+* [Advanced filtering](performance#data-filtering) -- data files are pruned with partition and column-level stats, using table metadata
+
+Iceberg was designed to solve correctness problems in eventually-consistent cloud object stores.
+
+* [Works with any cloud store](reliability) and reduces NN congestion when in HDFS, by avoiding listing and renames
+* [Serializable isolation](reliability) -- table changes are atomic and readers never see partial or uncommitted changes
+* [Multiple concurrent writers](reliability#concurrent-write-operations) use optimistic concurrency and will retry to ensure that compatible updates succeed, even when writes conflict
+
+### Open standard
+
+Iceberg has been designed and developed to be an open community standard with a [specification](spec) to ensure compatibility across languages and implementations.
+
+[Apache Iceberg is open source](community), and is developed at the [Apache Software Foundation](https://www.apache.org/).
+
diff --git a/docs/content/docs/api/_index.md b/docs/content/docs/api/_index.md
new file mode 100644
index 0000000..23ff2e8
--- /dev/null
+++ b/docs/content/docs/api/_index.md
@@ -0,0 +1,5 @@
+---
+bookIconFa: fa-connectdevelop
+weight: 700
+bookCollapseSection: true
+---
\ No newline at end of file
diff --git a/docs/content/docs/api/java-api-quickstart.md b/docs/content/docs/api/java-api-quickstart.md
new file mode 100644
index 0000000..e4ebe09
--- /dev/null
+++ b/docs/content/docs/api/java-api-quickstart.md
@@ -0,0 +1,197 @@
+---
+weight: 100
+title: "Java Quickstart"
+url: java-api-quickstart
+aliases:
+    - "java/quickstart"
+---
+<!--
+ - Licensed to the Apache Software Foundation (ASF) under one or more
+ - contributor license agreements.  See the NOTICE file distributed with
+ - this work for additional information regarding copyright ownership.
+ - The ASF licenses this file to You under the Apache License, Version 2.0
+ - (the "License"); you may not use this file except in compliance with
+ - the License.  You may obtain a copy of the License at
+ -
+ -   http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing, software
+ - distributed under the License is distributed on an "AS IS" BASIS,
+ - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ - See the License for the specific language governing permissions and
+ - limitations under the License.
+ -->
+
+# Java API Quickstart
+
+## Create a table
+
+Tables are created using either a [`Catalog`](../javadoc/master/index.html?org/apache/iceberg/catalog/Catalog.html) or an implementation of the [`Tables`](../javadoc/master/index.html?org/apache/iceberg/Tables.html) interface.
+
+### Using a Hive catalog
+
+The Hive catalog connects to a Hive metastore to keep track of Iceberg tables.
+You can initialize a Hive catalog with a name and some properties.
+(see: [Catalog properties](../configuration/#catalog-properties))
+
+**Note:** Currently, `setConf` is always required for hive catalogs, but this will change in the future.
+
+```java
+import org.apache.iceberg.hive.HiveCatalog;
+
+Catalog catalog = new HiveCatalog();
+catalog.setConf(spark.sparkContext().hadoopConfiguration());  // Configure using Spark's Hadoop configuration
+
+Map <String, String> properties = new HashMap<String, String>();
+properties.put("warehouse", "...");
+properties.put("uri", "...");
+
+catalog.initialize("hive", properties);
+```
+
+The `Catalog` interface defines methods for working with tables, like `createTable`, `loadTable`, `renameTable`, and `dropTable`.
+
+To create a table, pass an `Identifier` and a `Schema` along with other initial metadata:
+
+```java
+import org.apache.iceberg.Table;
+import org.apache.iceberg.catalog.TableIdentifier;
+
+TableIdentifier name = TableIdentifier.of("logging", "logs");
+Table table = catalog.createTable(name, schema, spec);
+
+// or to load an existing table, use the following line
+// Table table = catalog.loadTable(name);
+```
+
+The logs [schema](#create-a-schema) and [partition spec](#create-a-partition-spec) are created below.
+
+
+### Using a Hadoop catalog
+
+A Hadoop catalog doesn't need to connect to a Hive MetaStore, but can only be used with HDFS or similar file systems that support atomic rename. Concurrent writes with a Hadoop catalog are not safe with a local FS or S3. To create a Hadoop catalog:
+
+```java
+import org.apache.hadoop.conf.Configuration;
+import org.apache.iceberg.hadoop.HadoopCatalog;
+
+Configuration conf = new Configuration();
+String warehousePath = "hdfs://host:8020/warehouse_path";
+HadoopCatalog catalog = new HadoopCatalog(conf, warehousePath);
+```
+
+Like the Hive catalog, `HadoopCatalog` implements `Catalog`, so it also has methods for working with tables, like `createTable`, `loadTable`, and `dropTable`.
+                                                                                       
+This example creates a table with Hadoop catalog:
+
+```java
+import org.apache.iceberg.Table;
+import org.apache.iceberg.catalog.TableIdentifier;
+
+TableIdentifier name = TableIdentifier.of("logging", "logs");
+Table table = catalog.createTable(name, schema, spec);
+
+// or to load an existing table, use the following line
+// Table table = catalog.loadTable(name);
+```
+
+The logs [schema](#create-a-schema) and [partition spec](#create-a-partition-spec) are created below.
+
+
+### Using Hadoop tables
+
+Iceberg also supports tables that are stored in a directory in HDFS. Concurrent writes with a Hadoop tables are not safe when stored in the local FS or S3. Directory tables don't support all catalog operations, like rename, so they use the `Tables` interface instead of `Catalog`.
+
+To create a table in HDFS, use `HadoopTables`:
+
+```java
+import org.apache.hadoop.conf.Configuration;
+import org.apache.iceberg.hadoop.HadoopTables;
+import org.apache.iceberg.Table;
+
+Configuration conf = new Configuration();
+HadoopTables tables = new HadoopTables(conf);
+Table table = tables.create(schema, spec, table_location);
+
+// or to load an existing table, use the following line
+// Table table = tables.load(table_location);
+```
+
+{{< hint danger >}}
+Hadoop tables shouldn't be used with file systems that do not support atomic rename. Iceberg relies on rename to synchronize concurrent commits for directory tables.
+{{< /hint >}}
+
+### Tables in Spark
+
+Spark uses both `HiveCatalog` and `HadoopTables` to load tables. Hive is used when the identifier passed to `load` or `save` is not a path, otherwise Spark assumes it is a path-based table.
+
+To read and write to tables from Spark see:
+
+* [SQL queries in Spark](../spark-queries#querying-with-sql)
+* [`INSERT INTO` in Spark](../spark-writes#insert-into)
+* [`MERGE INTO` in Spark](../spark-writes#merge-into)
+
+
+## Schemas
+
+### Create a schema
+
+This example creates a schema for a `logs` table:
+
+```java
+import org.apache.iceberg.Schema;
+import org.apache.iceberg.types.Types;
+
+Schema schema = new Schema(
+      Types.NestedField.required(1, "level", Types.StringType.get()),
+      Types.NestedField.required(2, "event_time", Types.TimestampType.withZone()),
+      Types.NestedField.required(3, "message", Types.StringType.get()),
+      Types.NestedField.optional(4, "call_stack", Types.ListType.ofRequired(5, Types.StringType.get()))
+    );
+```
+
+When using the Iceberg API directly, type IDs are required. Conversions from other schema formats, like Spark, Avro, and Parquet will automatically assign new IDs.
+
+When a table is created, all IDs in the schema are re-assigned to ensure uniqueness.
+
+### Convert a schema from Avro
+
+To create an Iceberg schema from an existing Avro schema, use converters in `AvroSchemaUtil`:
+
+```java
+import org.apache.avro.Schema;
+import org.apache.avro.Schema.Parser;
+import org.apache.iceberg.avro.AvroSchemaUtil;
+
+Schema avroSchema = new Parser().parse("{\"type\": \"record\" , ... }");
+Schema icebergSchema = AvroSchemaUtil.toIceberg(avroSchema);
+```
+
+### Convert a schema from Spark
+
+To create an Iceberg schema from an existing table, use converters in `SparkSchemaUtil`:
+
+```java
+import org.apache.iceberg.spark.SparkSchemaUtil;
+
+Schema schema = SparkSchemaUtil.schemaForTable(sparkSession, table_name);
+```
+
+## Partitioning
+
+### Create a partition spec
+
+Partition specs describe how Iceberg should group records into data files. Partition specs are created for a table's schema using a builder.
+
+This example creates a partition spec for the `logs` table that partitions records by the hour of the log event's timestamp and by log level:
+
+```java
+import org.apache.iceberg.PartitionSpec;
+
+PartitionSpec spec = PartitionSpec.builderFor(schema)
+      .hour("event_time")
+      .identity("level")
+      .build();
+```
+
+For more information on the different partition transforms that Iceberg offers, visit [this page](../spec#partitioning).
diff --git a/docs/content/docs/api/java-api.md b/docs/content/docs/api/java-api.md
new file mode 100644
index 0000000..3c765c8
--- /dev/null
+++ b/docs/content/docs/api/java-api.md
@@ -0,0 +1,259 @@
+---
+weight: 200
+title: "Java API"
+url: api
+aliases:
+    - "java/api"
+---
+<!--
+ - Licensed to the Apache Software Foundation (ASF) under one or more
+ - contributor license agreements.  See the NOTICE file distributed with
+ - this work for additional information regarding copyright ownership.
+ - The ASF licenses this file to You under the Apache License, Version 2.0
+ - (the "License"); you may not use this file except in compliance with
+ - the License.  You may obtain a copy of the License at
+ -
+ -   http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing, software
+ - distributed under the License is distributed on an "AS IS" BASIS,
+ - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ - See the License for the specific language governing permissions and
+ - limitations under the License.
+ -->
+
+# Iceberg Java API
+
+## Tables
+
+The main purpose of the Iceberg API is to manage table metadata, like schema, partition spec, metadata, and data files that store table data.
+
+Table metadata and operations are accessed through the `Table` interface. This interface will return table information.
+
+### Table metadata
+
+The [`Table` interface](../javadoc/master/index.html?org/apache/iceberg/Table.html) provides access to the table metadata:
+
+* `schema` returns the current table [schema](../schemas)
+* `spec` returns the current table partition spec
+* `properties` returns a map of key-value [properties](../configuration)
+* `currentSnapshot` returns the current table snapshot
+* `snapshots` returns all valid snapshots for the table
+* `snapshot(id)` returns a specific snapshot by ID
+* `location` returns the table's base location
+
+Tables also provide `refresh` to update the table to the latest version, and expose helpers:
+
+* `io` returns the `FileIO` used to read and write table files
+* `locationProvider` returns a `LocationProvider` used to create paths for data and metadata files
+
+
+### Scanning
+
+#### File level
+
+Iceberg table scans start by creating a `TableScan` object with `newScan`.
+
+```java
+TableScan scan = table.newScan();
+```
+
+To configure a scan, call `filter` and `select` on the `TableScan` to get a new `TableScan` with those changes.
+
+```java
+TableScan filteredScan = scan.filter(Expressions.equal("id", 5))
+```
+
+Calls to configuration methods create a new `TableScan` so that each `TableScan` is immutable and won't change unexpectedly if shared across threads.
+
+When a scan is configured, `planFiles`, `planTasks`, and `schema` are used to return files, tasks, and the read projection.
+
+```java
+TableScan scan = table.newScan()
+    .filter(Expressions.equal("id", 5))
+    .select("id", "data");
+
+Schema projection = scan.schema();
+Iterable<CombinedScanTask> tasks = scan.planTasks();
+```
+
+Use `asOfTime` or `useSnapshot` to configure the table snapshot for time travel queries.
+
+#### Row level
+
+Iceberg table scans start by creating a `ScanBuilder` object with `IcebergGenerics.read`.
+
+```java
+ScanBuilder scanBuilder = IcebergGenerics.read(table)
+```
+
+To configure a scan, call `where` and `select` on the `ScanBuilder` to get a new `ScanBuilder` with those changes.
+
+```java
+scanBuilder.where(Expressions.equal("id", 5))
+```
+
+When a scan is configured, call method `build` to execute scan. `build` return `CloseableIterable<Record>`
+
+```java
+CloseableIterable<Record> result = IcebergGenerics.read(table)
+        .where(Expressions.lessThan("id", 5))
+        .build();
+```
+where `Record` is Iceberg record for iceberg-data module `org.apache.iceberg.data.Record`.
+
+### Update operations
+
+`Table` also exposes operations that update the table. These operations use a builder pattern, [`PendingUpdate`](../javadoc/master/index.html?org/apache/iceberg/PendingUpdate.html), that commits when `PendingUpdate#commit` is called.
+
+For example, updating the table schema is done by calling `updateSchema`, adding updates to the builder, and finally calling `commit` to commit the pending changes to the table:
+
+```java
+table.updateSchema()
+    .addColumn("count", Types.LongType.get())
+    .commit();
+```
+
+Available operations to update a table are:
+
+* `updateSchema` -- update the table schema
+* `updateProperties` -- update table properties
+* `updateLocation` -- update the table's base location
+* `newAppend` -- used to append data files
+* `newFastAppend` -- used to append data files, will not compact metadata
+* `newOverwrite` -- used to append data files and remove files that are overwritten
+* `newDelete` -- used to delete data files
+* `newRewrite` -- used to rewrite data files; will replace existing files with new versions
+* `newTransaction` -- create a new table-level transaction
+* `rewriteManifests` -- rewrite manifest data by clustering files, for faster scan planning
+* `rollback` -- rollback the table state to a specific snapshot
+
+### Transactions
+
+Transactions are used to commit multiple table changes in a single atomic operation. A transaction is used to create individual operations using factory methods, like `newAppend`, just like working with a `Table`. Operations created by a transaction are committed as a group when `commitTransaction` is called.
+
+For example, deleting and appending a file in the same transaction:
+```java
+Transaction t = table.newTransaction();
+
+// commit operations to the transaction
+t.newDelete().deleteFromRowFilter(filter).commit();
+t.newAppend().appendFile(data).commit();
+
+// commit all the changes to the table
+t.commitTransaction();
+```
+
+## Types
+
+Iceberg data types are located in the [`org.apache.iceberg.types` package](../javadoc/master/index.html?org/apache/iceberg/types/package-summary.html).
+
+### Primitives
+
+Primitive type instances are available from static methods in each type class. Types without parameters use `get`, and types like `decimal` use factory methods:
+
+```java
+Types.IntegerType.get()    // int
+Types.DoubleType.get()     // double
+Types.DecimalType.of(9, 2) // decimal(9, 2)
+```
+
+### Nested types
+
+Structs, maps, and lists are created using factory methods in type classes.
+
+Like struct fields, map keys or values and list elements are tracked as nested fields. Nested fields track [field IDs](../evolution#correctness) and nullability.
+
+Struct fields are created using `NestedField.optional` or `NestedField.required`. Map value and list element nullability is set in the map and list factory methods.
+
+```java
+// struct<1 id: int, 2 data: optional string>
+StructType struct = Struct.of(
+    Types.NestedField.required(1, "id", Types.IntegerType.get()),
+    Types.NestedField.optional(2, "data", Types.StringType.get())
+  )
+```
+```java
+// map<1 key: int, 2 value: optional string>
+MapType map = MapType.ofOptional(
+    1, Types.IntegerType.get(),
+    2, Types.StringType.get()
+  )
+```
+```java
+// array<1 element: int>
+ListType list = ListType.ofRequired(1, IntegerType.get());
+```
+
+
+## Expressions
+
+Iceberg's expressions are used to configure table scans. To create expressions, use the factory methods in [`Expressions`](../javadoc/master/index.html?org/apache/iceberg/expressions/Expressions.html).
+
+Supported predicate expressions are:
+
+* `isNull`
+* `notNull`
+* `equal`
+* `notEqual`
+* `lessThan`
+* `lessThanOrEqual`
+* `greaterThan`
+* `greaterThanOrEqual`
+* `in`
+* `notIn`
+* `startsWith`
+
+Supported expression operations are:
+
+* `and`
+* `or`
+* `not`
+
+Constant expressions are:
+
+* `alwaysTrue`
+* `alwaysFalse`
+
+### Expression binding
+
+When created, expressions are unbound. Before an expression is used, it will be bound to a data type to find the field ID the expression name represents, and to convert predicate literals.
+
+For example, before using the expression `lessThan("x", 10)`, Iceberg needs to determine which column `"x"` refers to and convert `10` to that column's data type.
+
+If the expression could be bound to the type `struct<1 x: long, 2 y: long>` or to `struct<11 x: int, 12 y: int>`.
+
+### Expression example
+
+```java
+table.newScan()
+    .filter(Expressions.greaterThanOrEqual("x", 5))
+    .filter(Expressions.lessThan("x", 10))
+```
+
+
+## Modules
+
+Iceberg table support is organized in library modules:
+
+* `iceberg-common` contains utility classes used in other modules
+* `iceberg-api` contains the public Iceberg API, including expressions, types, tables, and operations
+* `iceberg-arrow` is an implementation of the Iceberg type system for reading and writing data stored in Iceberg tables using Apache Arrow as the in-memory data format
+* `iceberg-aws` contains implementations of the Iceberg API to be used with tables stored on AWS S3 and/or for tables defined using the AWS Glue data catalog
+* `iceberg-core` contains implementations of the Iceberg API and support for Avro data files, **this is what processing engines should depend on**
+* `iceberg-parquet` is an optional module for working with tables backed by Parquet files
+* `iceberg-orc` is an optional module for working with tables backed by ORC files (*experimental*)
+* `iceberg-hive-metastore` is an implementation of Iceberg tables backed by the Hive metastore Thrift client
+
+This project Iceberg also has modules for adding Iceberg support to processing engines and associated tooling:
+
+* `iceberg-spark2` is an implementation of Spark's Datasource V2 API in 2.4 for Iceberg (use iceberg-spark-runtime for a shaded version)
+* `iceberg-spark3` is an implementation of Spark's Datasource V2 API in 3.0 for Iceberg (use iceberg-spark3-runtime for a shaded version)
+* `iceberg-flink` is an implementation of Flink's Table and DataStream API for Iceberg (use iceberg-flink-runtime for a shaded version)
+* `iceberg-hive3` is an implementation of Hive 3 specific SerDe's for Timestamp, TimestampWithZone, and Date object inspectors (use iceberg-hive-runtime for a shaded version).
+* `iceberg-mr` is an implementation of MapReduce and Hive InputFormats and SerDes for Iceberg (use iceberg-hive-runtime for a shaded version for use with Hive)
+* `iceberg-nessie` is a module used to integrate Iceberg table metadata history and operations with [Project Nessie](https://projectnessie.org/)
+* `iceberg-data` is a client library used to read Iceberg tables from JVM applications
+* `iceberg-pig` is an implementation of Pig's LoadFunc API for Iceberg
+* `iceberg-runtime` generates a shaded runtime jar for Spark to integrate with iceberg tables
+
diff --git a/docs/content/docs/api/java-custom-catalog.md b/docs/content/docs/api/java-custom-catalog.md
new file mode 100644
index 0000000..5787412
--- /dev/null
+++ b/docs/content/docs/api/java-custom-catalog.md
@@ -0,0 +1,272 @@
+---
+weight: 300
+title: "Java Custom Catalog"
+url: custom-catalog
+aliases:
+    - "java/custom-catalog"
+---
+<!--
+ - Licensed to the Apache Software Foundation (ASF) under one or more
+ - contributor license agreements.  See the NOTICE file distributed with
+ - this work for additional information regarding copyright ownership.
+ - The ASF licenses this file to You under the Apache License, Version 2.0
+ - (the "License"); you may not use this file except in compliance with
+ - the License.  You may obtain a copy of the License at
+ -
+ -   http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing, software
+ - distributed under the License is distributed on an "AS IS" BASIS,
+ - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ - See the License for the specific language governing permissions and
+ - limitations under the License.
+ -->
+
+# Custom Catalog Implementation
+
+It's possible to read an iceberg table either from an hdfs path or from a hive table. It's also possible to use a custom metastore in place of hive. The steps to do that are as follows.
+
+- [Custom TableOperations](#custom-table-operations-implementation)
+- [Custom Catalog](#custom-catalog-implementation)
+- [Custom FileIO](#custom-file-io-implementation)
+- [Custom LocationProvider](#custom-location-provider-implementation)
+- [Custom IcebergSource](#custom-icebergsource)
+
+### Custom table operations implementation
+Extend `BaseMetastoreTableOperations` to provide implementation on how to read and write metadata
+
+Example:
+```java
+class CustomTableOperations extends BaseMetastoreTableOperations {
+  private String dbName;
+  private String tableName;
+  private Configuration conf;
+  private FileIO fileIO;
+
+  protected CustomTableOperations(Configuration conf, String dbName, String tableName) {
+    this.conf = conf;
+    this.dbName = dbName;
+    this.tableName = tableName;
+  }
+
+  // The doRefresh method should provide implementation on how to get the metadata location
+  @Override
+  public void doRefresh() {
+
+    // Example custom service which returns the metadata location given a dbName and tableName
+    String metadataLocation = CustomService.getMetadataForTable(conf, dbName, tableName);
+
+    // When updating from a metadata file location, call the helper method
+    refreshFromMetadataLocation(metadataLocation);
+
+  }
+
+  // The doCommit method should provide implementation on how to update with metadata location atomically
+  @Override
+  public void doCommit(TableMetadata base, TableMetadata metadata) {
+    String oldMetadataLocation = base.location();
+
+    // Write new metadata using helper method
+    String newMetadataLocation = writeNewMetadata(metadata, currentVersion() + 1);
+
+    // Example custom service which updates the metadata location for the given db and table atomically
+    CustomService.updateMetadataLocation(dbName, tableName, oldMetadataLocation, newMetadataLocation);
+
+  }
+
+  // The io method provides a FileIO which is used to read and write the table metadata files
+  @Override
+  public FileIO io() {
+    if (fileIO == null) {
+      fileIO = new HadoopFileIO(conf);
+    }
+    return fileIO;
+  }
+}
+```
+
+A `TableOperations` instance is usually obtained by calling `Catalog.newTableOps(TableIdentifier)`.
+See the next section about implementing and loading a custom catalog.
+
+### Custom catalog implementation
+Extend `BaseMetastoreCatalog` to provide default warehouse locations and instantiate `CustomTableOperations`
+
+Example:
+```java
+public class CustomCatalog extends BaseMetastoreCatalog {
+
+  private Configuration configuration;
+
+  // must have a no-arg constructor to be dynamically loaded
+  // initialize(String name, Map<String, String> properties) will be called to complete initialization
+  public CustomCatalog() {
+  }
+
+  public CustomCatalog(Configuration configuration) {
+    this.configuration = configuration;
+  }
+
+  @Override
+  protected TableOperations newTableOps(TableIdentifier tableIdentifier) {
+    String dbName = tableIdentifier.namespace().level(0);
+    String tableName = tableIdentifier.name();
+    // instantiate the CustomTableOperations
+    return new CustomTableOperations(configuration, dbName, tableName);
+  }
+
+  @Override
+  protected String defaultWarehouseLocation(TableIdentifier tableIdentifier) {
+
+    // Can choose to use any other configuration name
+    String tableLocation = configuration.get("custom.iceberg.warehouse.location");
+
+    // Can be an s3 or hdfs path
+    if (tableLocation == null) {
+      throw new RuntimeException("custom.iceberg.warehouse.location configuration not set!");
+    }
+
+    return String.format(
+            "%s/%s.db/%s", tableLocation,
+            tableIdentifier.namespace().levels()[0],
+            tableIdentifier.name());
+  }
+
+  @Override
+  public boolean dropTable(TableIdentifier identifier, boolean purge) {
+    // Example service to delete table
+    CustomService.deleteTable(identifier.namepsace().level(0), identifier.name());
+  }
+
+  @Override
+  public void renameTable(TableIdentifier from, TableIdentifier to) {
+    Preconditions.checkArgument(from.namespace().level(0).equals(to.namespace().level(0)),
+            "Cannot move table between databases");
+    // Example service to rename table
+    CustomService.renameTable(from.namepsace().level(0), from.name(), to.name());
+  }
+
+  // implement this method to read catalog name and properties during initialization
+  public void initialize(String name, Map<String, String> properties) {
+  }
+}
+```
+
+Catalog implementations can be dynamically loaded in most compute engines.
+For Spark and Flink, you can specify the `catalog-impl` catalog property to load it.
+Read the [Configuration](../configuration/#catalog-properties) section for more details.
+For MapReduce, implement `org.apache.iceberg.mr.CatalogLoader` and set Hadoop property `iceberg.mr.catalog.loader.class` to load it.
+If your catalog must read Hadoop configuration to access certain environment properties, make your catalog implement `org.apache.hadoop.conf.Configurable`.
+
+### Custom file IO implementation
+
+Extend `FileIO` and provide implementation to read and write data files
+
+Example:
+```java
+public class CustomFileIO implements FileIO {
+
+  // must have a no-arg constructor to be dynamically loaded
+  // initialize(Map<String, String> properties) will be called to complete initialization
+  public CustomFileIO() {
+  }
+
+  @Override
+  public InputFile newInputFile(String s) {
+    // you also need to implement the InputFile interface for a custom input file
+    return new CustomInputFile(s);
+  }
+
+  @Override
+  public OutputFile newOutputFile(String s) {
+    // you also need to implement the OutputFile interface for a custom output file
+    return new CustomOutputFile(s);
+  }
+
+  @Override
+  public void deleteFile(String path) {
+    Path toDelete = new Path(path);
+    FileSystem fs = Util.getFs(toDelete);
+    try {
+        fs.delete(toDelete, false /* not recursive */);
+    } catch (IOException e) {
+        throw new RuntimeIOException(e, "Failed to delete file: %s", path);
+    }
+  }
+
+  // implement this method to read catalog properties during initialization
+  public void initialize(Map<String, String> properties) {
+  }
+}
+```
+
+If you are already implementing your own catalog, you can implement `TableOperations.io()` to use your custom `FileIO`.
+In addition, custom `FileIO` implementations can also be dynamically loaded in `HadoopCatalog` and `HiveCatalog` by specifying the `io-impl` catalog property.
+Read the [Configuration](../configuration/#catalog-properties) section for more details.
+If your `FileIO` must read Hadoop configuration to access certain environment properties, make your `FileIO` implement `org.apache.hadoop.conf.Configurable`.
+
+### Custom location provider implementation
+
+Extend `LocationProvider` and provide implementation to determine the file path to write data
+
+Example:
+```java
+public class CustomLocationProvider implements LocationProvider {
+
+  private String tableLocation;
+
+  // must have a 2-arg constructor like this, or a no-arg constructor
+  public CustomLocationProvider(String tableLocation, Map<String, String> properties) {
+    this.tableLocation = tableLocation;
+  }
+
+  @Override
+  public String newDataLocation(String filename) {
+    // can use any custom method to generate a file path given a file name
+    return String.format("%s/%s/%s", tableLocation, UUID.randomUUID().toString(), filename);
+  }
+
+  @Override
+  public String newDataLocation(PartitionSpec spec, StructLike partitionData, String filename) {
+    // can use any custom method to generate a file path given a partition info and file name
+    return newDataLocation(filename);
+  }
+}
+```
+
+If you are already implementing your own catalog, you can override `TableOperations.locationProvider()` to use your custom default `LocationProvider`.
+To use a different custom location provider for a specific table, specify the implementation when creating the table using table property `write.location-provider.impl`
+
+Example:
+```sql
+CREATE TABLE hive.default.my_table (
+  id bigint,
+  data string,
+  category string)
+USING iceberg
+OPTIONS (
+  'write.location-provider.impl'='com.my.CustomLocationProvider'
+)
+PARTITIONED BY (category);
+```
+
+### Custom IcebergSource
+Extend `IcebergSource` and provide implementation to read from `CustomCatalog`
+
+Example:
+```java
+public class CustomIcebergSource extends IcebergSource {
+
+  @Override
+  protected Table findTable(DataSourceOptions options, Configuration conf) {
+    Optional<String> path = options.get("path");
+    Preconditions.checkArgument(path.isPresent(), "Cannot open table: path is not set");
+
+    // Read table from CustomCatalog
+    CustomCatalog catalog = new CustomCatalog(conf);
+    TableIdentifier tableIdentifier = TableIdentifier.parse(path.get());
+    return catalog.loadTable(tableIdentifier);
+  }
+}
+```
+
+Register the `CustomIcebergSource` by updating  `META-INF/services/org.apache.spark.sql.sources.DataSourceRegister` with its fully qualified name
diff --git a/docs/content/docs/api/python-api-intro.md b/docs/content/docs/api/python-api-intro.md
new file mode 100644
index 0000000..11622d3
--- /dev/null
+++ b/docs/content/docs/api/python-api-intro.md
@@ -0,0 +1,180 @@
+---
+weight: 500
+title: "Python API"
+url: python-api-intro
+aliases:
+    - "python/api-intro"
+---
+<!--
+ - Licensed to the Apache Software Foundation (ASF) under one or more
+ - contributor license agreements.  See the NOTICE file distributed with
+ - this work for additional information regarding copyright ownership.
+ - The ASF licenses this file to You under the Apache License, Version 2.0
+ - (the "License"); you may not use this file except in compliance with
+ - the License.  You may obtain a copy of the License at
+ -
+ -   http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing, software
+ - distributed under the License is distributed on an "AS IS" BASIS,
+ - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ - See the License for the specific language governing permissions and
+ - limitations under the License.
+ -->
+
+# Iceberg Python API
+
+Much of the python api conforms to the java api. You can get more info about the java api [here](../api).
+
+## Catalog
+
+The Catalog interface, like java provides search and management operations for tables.
+
+To create a catalog:
+
+``` python
+from iceberg.hive import HiveTables
+
+# instantiate Hive Tables
+conf = {"hive.metastore.uris": 'thrift://{hms_host}:{hms_port}'}
+tables = HiveTables(conf)
+```
+
+and to create a table from a catalog:
+
+``` python
+from iceberg.api.schema import Schema\
+from iceberg.api.types import TimestampType, DoubleType, StringType, NestedField
+from iceberg.api.partition_spec import PartitionSpecBuilder
+
+schema = Schema(NestedField.optional(1, "DateTime", TimestampType.with_timezone()),
+                NestedField.optional(2, "Bid", DoubleType.get()),
+                NestedField.optional(3, "Ask", DoubleType.get()),
+                NestedField.optional(4, "symbol", StringType.get()))
+partition_spec = PartitionSpecBuilder(schema).add(1, 1000, "DateTime_day", "day").build()
+
+tables.create(schema, "test.test_123", partition)
+```
+
+
+## Tables
+
+The Table interface provides access to table metadata
+
++ schema returns the current table `Schema`
++ spec returns the current table `PartitonSpec`
++ properties returns a map of key-value `TableProperties`
++ currentSnapshot returns the current table `Snapshot`
++ snapshots returns all valid snapshots for the table
++ snapshot(id) returns a specific snapshot by ID
++ location returns the table’s base location
+
+Tables also provide refresh to update the table to the latest version.
+
+### Scanning
+Iceberg table scans start by creating a `TableScan` object with `newScan`.
+
+``` python
+scan = table.new_scan();
+```
+
+To configure a scan, call filter and select on the `TableScan` to get a new `TableScan` with those changes.
+
+``` python
+filtered_scan = scan.filter(Expressions.equal("id", 5))
+```
+
+String expressions can also be passed to the filter method.
+
+``` python
+filtered_scan = scan.filter("id=5")
+```
+
+`Schema` projections can be applied against a `TableScan` by passing a list of column names.
+
+``` python
+filtered_scan = scan.select(["col_1", "col_2", "col_3"])
+```
+
+Because some data types cannot be read using the python library, a convenience method for excluding columns from projection is provided.
+
+``` python
+filtered_scan = scan.select_except(["unsupported_col_1", "unsupported_col_2"])
+```
+
+
+Calls to configuration methods create a new `TableScan` so that each `TableScan` is immutable.
+
+When a scan is configured, `planFiles`, `planTasks`, and `Schema` are used to return files, tasks, and the read projection.
+
+``` python
+scan = table.new_scan() \
+    .filter("id=5") \
+    .select(["id", "data"])
+
+projection = scan.schema
+for task in scan.plan_tasks():
+    print(task)
+```
+
+## Types
+
+Iceberg data types are located in `iceberg.api.types.types`
+
+### Primitives
+
+Primitive type instances are available from static methods in each type class. Types without parameters use `get`, and types like `DecimalType` use factory methods:
+
+```python
+IntegerType.get()    # int
+DoubleType.get()     # double
+DecimalType.of(9, 2) # decimal(9, 2)
+```
+
+### Nested types
+Structs, maps, and lists are created using factory methods in type classes.
+
+Like struct fields, map keys or values and list elements are tracked as nested fields. Nested fields track [field IDs](https://iceberg.apache.org/evolution/#correctness) and nullability.
+
+Struct fields are created using `NestedField.optional` or `NestedField.required`. Map value and list element nullability is set in the map and list factory methods.
+
+```python
+# struct<1 id: int, 2 data: optional string>
+struct = StructType.of([NestedField.required(1, "id", IntegerType.get()),
+                        NestedField.optional(2, "data", StringType.get()])
+  )
+```
+```python
+# map<1 key: int, 2 value: optional string>
+map_var = MapType.of_optional(1, IntegerType.get(),
+                          2, StringType.get())
+```
+```python
+# array<1 element: int>
+list_var = ListType.of_required(1, IntegerType.get());
+```
+
+## Expressions
+Iceberg’s `Expressions` are used to configure table scans. To create `Expressions`, use the factory methods in `Expressions`.
+
+Supported `Predicate` expressions are:
+
++ `is_null`
++ `not_null`
++ `equal`
++ `not_equal`
++ `less_than`
++ `less_than_or_equal`
++ `greater_than`
++ `greater_than_or_equal`
+
+Supported expression `Operations`are:
+
++ `and`
++ `or`
++ `not`
+
+Constant expressions are:
+
++ `always_true`
++ `always_false`
diff --git a/docs/content/docs/api/python-feature-support.md b/docs/content/docs/api/python-feature-support.md
new file mode 100644
index 0000000..3ae2172
--- /dev/null
+++ b/docs/content/docs/api/python-feature-support.md
@@ -0,0 +1,76 @@
+---
+weight: 600
+title: "Python Feature Support"
+url: python-feature-support
+aliases:
+    - "python/feature-support"
+---
+<!--
+ - Licensed to the Apache Software Foundation (ASF) under one or more
+ - contributor license agreements.  See the NOTICE file distributed with
+ - this work for additional information regarding copyright ownership.
+ - The ASF licenses this file to You under the Apache License, Version 2.0
+ - (the "License"); you may not use this file except in compliance with
+ - the License.  You may obtain a copy of the License at
+ -
+ -   http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing, software
+ - distributed under the License is distributed on an "AS IS" BASIS,
+ - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ - See the License for the specific language governing permissions and
+ - limitations under the License.
+ -->
+
+# Feature Support
+
+The goal is that the python library will provide a functional, performant subset of the java library. The initial focus has been on reading table metadata as well as providing the capability to both plan and execute a scan.
+
+## Feature Comparison
+
+### Metadata
+
+| Operation               | Java  | Python |
+|:------------------------|:-----:|:------:|
+| Get Schema              |    X  |    X   |
+| Get Snapshots           |    X  |    X   |
+| Plan Scan               |    X  |    X   |
+| Plan Scan for Snapshot  |    X  |    X   |
+| Update Current Snapshot |    X  |        |
+| Set Table Properties    |    X  |        |
+| Create Table            |    X  |    X   |
+| Drop Table              |    X  |    X   |
+| Alter Table             |    X  |        |
+
+
+### Read Support
+
+Pyarrow is used for reading parquet files, so read support is limited to what is currently supported in the pyarrow.parquet package.
+
+#### Primitive Types
+
+
+| Data Type               | Java | Python |
+|:------------------------|:----:|:------:|
+| BooleanType             |   X  |    X   |
+| DateType                |   X  |    X   |
+| DecimalType             |   X  |    X   |
+| FloatType               |   X  |    X   |
+| IntegerType             |   X  |    X   |
+| LongType                |   X  |    X   |
+| TimeType                |   X  |    X   |
+| TimestampType           |   X  |    X   |
+
+#### Nested Types
+
+| Data Type               | Java | Python |
+|:------------------------|:----:|:------:|
+| ListType of primitives  |   X  |    X   |
+| MapType of primitives   |   X  |    X   |
+| StructType of primitives|   X  |    X   |
+| ListType of Nested Types|   X  |        |
+| MapType of Nested Types |   X  |        |
+
+### Write Support
+
+The python client does not currently support write capability
diff --git a/docs/content/docs/api/python-quickstart.md b/docs/content/docs/api/python-quickstart.md
new file mode 100644
index 0000000..db030e6
--- /dev/null
+++ b/docs/content/docs/api/python-quickstart.md
@@ -0,0 +1,67 @@
+---
+weight: 400
+title: "Python Quickstart"
+url: python-quickstart
+aliases:
+    - "python/quickstart"
+---
+<!--
+ - Licensed to the Apache Software Foundation (ASF) under one or more
+ - contributor license agreements.  See the NOTICE file distributed with
+ - this work for additional information regarding copyright ownership.
+ - The ASF licenses this file to You under the Apache License, Version 2.0
+ - (the "License"); you may not use this file except in compliance with
+ - the License.  You may obtain a copy of the License at
+ -
+ -   http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing, software
+ - distributed under the License is distributed on an "AS IS" BASIS,
+ - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ - See the License for the specific language governing permissions and
+ - limitations under the License.
+ -->
+
+
+# Python API Quickstart
+
+## Installation
+
+Iceberg python is currently in development, for development and testing purposes the best way to install the library is to perform the following steps:
+```
+git clone https://github.com/apache/iceberg.git
+cd iceberg/python
+pip install -e .
+```
+
+## Testing
+Testing is done using tox. The config can be found in `tox.ini` within the python directory of the iceberg project.
+
+```
+# simply run tox from within the python dir
+tox
+```
+
+# Examples
+
+## Inspect Table Metadata
+``` python
+
+from iceberg.hive import HiveTables
+
+# instantiate Hive Tables
+conf = {"hive.metastore.uris": 'thrift://{hms_host}:{hms_port}'}
+tables = HiveTables(conf)
+
+# load table
+tbl = tables.load("iceberg_db.iceberg_test_table")
+
+# inspect metadata
+print(tbl.schema())
+print(tbl.spec())
+print(tbl.location())
+
+# get table level record count
+from pprint import pprint
+pprint(int(tbl.current_snapshot().summary.get("total-records")))
+```
diff --git a/docs/content/docs/asf/_index.md b/docs/content/docs/asf/_index.md
new file mode 100644
index 0000000..114d1da
--- /dev/null
+++ b/docs/content/docs/asf/_index.md
@@ -0,0 +1,6 @@
+---
+title: "ASF"
+bookIconImage: ../img/asf.png
+bookCollapseSection: true
+weight: 1200
+---
\ No newline at end of file
diff --git a/docs/content/docs/asf/donate/_index.md b/docs/content/docs/asf/donate/_index.md
new file mode 100644
index 0000000..7a8edb6
--- /dev/null
+++ b/docs/content/docs/asf/donate/_index.md
@@ -0,0 +1,22 @@
+---
+title: "Donate"
+weight: 400
+bookExternalUrlNewWindow: https://www.apache.org/foundation/sponsorship.html
+bookIconFa: fa-external-link
+---
+<!--
+ - Licensed to the Apache Software Foundation (ASF) under one or more
+ - contributor license agreements.  See the NOTICE file distributed with
+ - this work for additional information regarding copyright ownership.
+ - The ASF licenses this file to You under the Apache License, Version 2.0
+ - (the "License"); you may not use this file except in compliance with
+ - the License.  You may obtain a copy of the License at
+ -
+ -   http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing, software
+ - distributed under the License is distributed on an "AS IS" BASIS,
+ - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ - See the License for the specific language governing permissions and
+ - limitations under the License.
+ -->
\ No newline at end of file
diff --git a/docs/content/docs/asf/events/_index.md b/docs/content/docs/asf/events/_index.md
new file mode 100644
index 0000000..0cc7a3b
--- /dev/null
+++ b/docs/content/docs/asf/events/_index.md
@@ -0,0 +1,22 @@
+---
+title: "Events"
+weight: 500
+bookExternalUrlNewWindow: https://www.apache.org/events/current-event.html
+bookIconFa: fa-external-link
+---
+<!--
+ - Licensed to the Apache Software Foundation (ASF) under one or more
+ - contributor license agreements.  See the NOTICE file distributed with
+ - this work for additional information regarding copyright ownership.
+ - The ASF licenses this file to You under the Apache License, Version 2.0
+ - (the "License"); you may not use this file except in compliance with
+ - the License.  You may obtain a copy of the License at
+ -
+ -   http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing, software
+ - distributed under the License is distributed on an "AS IS" BASIS,
+ - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ - See the License for the specific language governing permissions and
+ - limitations under the License.
+ -->
\ No newline at end of file
diff --git a/docs/content/docs/asf/license/_index.md b/docs/content/docs/asf/license/_index.md
new file mode 100644
index 0000000..6e111e7
--- /dev/null
+++ b/docs/content/docs/asf/license/_index.md
@@ -0,0 +1,22 @@
+---
+title: "License"
+weight: 100
+bookExternalUrlNewWindow: https://www.apache.org/licenses/
+bookIconFa: fa-external-link
+---
+<!--
+ - Licensed to the Apache Software Foundation (ASF) under one or more
+ - contributor license agreements.  See the NOTICE file distributed with
+ - this work for additional information regarding copyright ownership.
+ - The ASF licenses this file to You under the Apache License, Version 2.0
+ - (the "License"); you may not use this file except in compliance with
+ - the License.  You may obtain a copy of the License at
+ -
+ -   http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing, software
+ - distributed under the License is distributed on an "AS IS" BASIS,
+ - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ - See the License for the specific language governing permissions and
+ - limitations under the License.
+ -->
\ No newline at end of file
diff --git a/docs/content/docs/asf/security/_index.md b/docs/content/docs/asf/security/_index.md
new file mode 100644
index 0000000..198c81f
--- /dev/null
+++ b/docs/content/docs/asf/security/_index.md
@@ -0,0 +1,22 @@
+---
+title: "Security"
+weight: 200
+bookExternalUrlNewWindow: https://www.apache.org/security/
+bookIconFa: fa-external-link
+---
+<!--
+ - Licensed to the Apache Software Foundation (ASF) under one or more
+ - contributor license agreements.  See the NOTICE file distributed with
+ - this work for additional information regarding copyright ownership.
+ - The ASF licenses this file to You under the Apache License, Version 2.0
+ - (the "License"); you may not use this file except in compliance with
+ - the License.  You may obtain a copy of the License at
+ -
+ -   http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing, software
+ - distributed under the License is distributed on an "AS IS" BASIS,
+ - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ - See the License for the specific language governing permissions and
+ - limitations under the License.
+ -->
\ No newline at end of file
diff --git a/docs/content/docs/asf/sponsors/_index.md b/docs/content/docs/asf/sponsors/_index.md
new file mode 100644
index 0000000..0b76124
--- /dev/null
+++ b/docs/content/docs/asf/sponsors/_index.md
@@ -0,0 +1,22 @@
+---
+title: "Sponsors"
+weight: 300
+bookExternalUrlNewWindow: https://www.apache.org/foundation/thanks.html
+bookIconFa: fa-external-link
+---
+<!--
+ - Licensed to the Apache Software Foundation (ASF) under one or more
+ - contributor license agreements.  See the NOTICE file distributed with
+ - this work for additional information regarding copyright ownership.
+ - The ASF licenses this file to You under the Apache License, Version 2.0
+ - (the "License"); you may not use this file except in compliance with
+ - the License.  You may obtain a copy of the License at
+ -
+ -   http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing, software
+ - distributed under the License is distributed on an "AS IS" BASIS,
+ - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ - See the License for the specific language governing permissions and
+ - limitations under the License.
+ -->
\ No newline at end of file
diff --git a/docs/content/docs/community/_index.md b/docs/content/docs/community/_index.md
new file mode 100644
index 0000000..9d68285
--- /dev/null
+++ b/docs/content/docs/community/_index.md
@@ -0,0 +1,5 @@
+---
+bookIconFa: fa-users
+bookCollapseSection: true
+weight: 900
+---
\ No newline at end of file
diff --git a/docs/content/docs/community/blogs.md b/docs/content/docs/community/blogs.md
new file mode 100644
index 0000000..b98184e
--- /dev/null
+++ b/docs/content/docs/community/blogs.md
@@ -0,0 +1,4 @@
+---
+title: "Blogs"
+bookUrlFromBaseURL: /../../blogs
+---
\ No newline at end of file
diff --git a/docs/content/docs/community/join.md b/docs/content/docs/community/join.md
new file mode 100644
index 0000000..8fd449b
--- /dev/null
+++ b/docs/content/docs/community/join.md
@@ -0,0 +1,4 @@
+---
+title: "Join"
+bookUrlFromBaseURL: /../../community
+---
\ No newline at end of file
diff --git a/docs/content/docs/community/talks.md b/docs/content/docs/community/talks.md
new file mode 100644
index 0000000..9c2ee32
--- /dev/null
+++ b/docs/content/docs/community/talks.md
@@ -0,0 +1,4 @@
+---
+title: "Talks"
+bookUrlFromBaseURL: /../../talks
+---
\ No newline at end of file
diff --git a/docs/content/docs/flink/_index.md b/docs/content/docs/flink/_index.md
new file mode 100644
index 0000000..fbcc19e
--- /dev/null
+++ b/docs/content/docs/flink/_index.md
@@ -0,0 +1,5 @@
+---
+bookIconImage: flink-logo.png
+bookFlatSection: true
+weight: 300
+---
\ No newline at end of file
diff --git a/docs/content/docs/flink/flink-connector.md b/docs/content/docs/flink/flink-connector.md
new file mode 100644
index 0000000..7bcc456
--- /dev/null
+++ b/docs/content/docs/flink/flink-connector.md
@@ -0,0 +1,145 @@
+---
+url: flink-connector
+aliases:
+    - "flink/flink-connector"
+---
+<!--
+ - Licensed to the Apache Software Foundation (ASF) under one or more
+ - contributor license agreements.  See the NOTICE file distributed with
+ - this work for additional information regarding copyright ownership.
+ - The ASF licenses this file to You under the Apache License, Version 2.0
+ - (the "License"); you may not use this file except in compliance with
+ - the License.  You may obtain a copy of the License at
+ -
+ -   http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing, software
+ - distributed under the License is distributed on an "AS IS" BASIS,
+ - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ - See the License for the specific language governing permissions and
+ - limitations under the License.
+ -->
+
+# Flink Connector
+Apache Flink supports creating Iceberg table directly without creating the explicit Flink catalog in Flink SQL. That means we can just create an iceberg table by specifying `'connector'='iceberg'` table option in Flink SQL which is similar to usage in the Flink official [document](https://nightlies.apache.org/flink/flink-docs-release-1.13/docs/connectors/table/overview/).
+
+In Flink, the SQL `CREATE TABLE test (..) WITH ('connector'='iceberg', ...)` will create a Flink table in current Flink catalog (use [GenericInMemoryCatalog](https://ci.apache.org/projects/flink/flink-docs-release-1.13/docs/dev/table/catalogs/#genericinmemorycatalog) by default),
+which is just mapping to the underlying iceberg table instead of maintaining iceberg table directly in current Flink catalog.
+
+To create the table in Flink SQL by using SQL syntax `CREATE TABLE test (..) WITH ('connector'='iceberg', ...)`,  Flink iceberg connector provides the following table properties:
+
+* `connector`: Use the constant `iceberg`.
+* `catalog-name`: User-specified catalog name. It's required because the connector don't have any default value.
+* `catalog-type`: Default to use `hive` if don't specify any value. The optional values are:
+    * `hive`: The Hive metastore catalog.
+    * `hadoop`: The hadoop catalog.
+    * `custom`: The customized catalog, see [custom catalog](../custom-catalog) for more details.
+* `catalog-database`: The iceberg database name in the backend catalog, use the current flink database name by default.
+* `catalog-table`: The iceberg table name in the backend catalog. Default to use the table name in the flink `CREATE TABLE` sentence.
+
+## Table managed in Hive catalog.
+
+Before executing the following SQL, please make sure you've configured the Flink SQL client correctly according to the quick start [document](../flink).
+
+The following SQL will create a Flink table in the current Flink catalog, which maps to the iceberg table `default_database.iceberg_table` managed in iceberg catalog.
+
+```sql
+CREATE TABLE flink_table (
+    id   BIGINT,
+    data STRING
+) WITH (
+    'connector'='iceberg',
+    'catalog-name'='hive_prod',
+    'uri'='thrift://localhost:9083',
+    'warehouse'='hdfs://nn:8020/path/to/warehouse'
+);
+```
+
+If you want to create a Flink table mapping to a different iceberg table managed in Hive catalog (such as `hive_db.hive_iceberg_table` in Hive), then you can create Flink table as following:
+
+```sql
+CREATE TABLE flink_table (
+    id   BIGINT,
+    data STRING
+) WITH (
+    'connector'='iceberg',
+    'catalog-name'='hive_prod',
+    'catalog-database'='hive_db',
+    'catalog-table'='hive_iceberg_table',
+    'uri'='thrift://localhost:9083',
+    'warehouse'='hdfs://nn:8020/path/to/warehouse'
+);
+```
+
+{{< hint info >}}
+The underlying catalog database (`hive_db` in the above example) will be created automatically if it does not exist when writing records into the Flink table.
+{{< /hint >}}
+
+## Table managed in hadoop catalog
+
+The following SQL will create a Flink table in current Flink catalog, which maps to the iceberg table `default_database.flink_table` managed in hadoop catalog.
+
+```sql
+CREATE TABLE flink_table (
+    id   BIGINT,
+    data STRING
+) WITH (
+    'connector'='iceberg',
+    'catalog-name'='hadoop_prod',
+    'catalog-type'='hadoop',
+    'warehouse'='hdfs://nn:8020/path/to/warehouse'
+);
+```
+
+## Table managed in custom catalog
+
+The following SQL will create a Flink table in current Flink catalog, which maps to the iceberg table `default_database.flink_table` managed in custom catalog.
+
+```sql
+CREATE TABLE flink_table (
+    id   BIGINT,
+    data STRING
+) WITH (
+    'connector'='iceberg',
+    'catalog-name'='custom_prod',
+    'catalog-type'='custom',
+    'catalog-impl'='com.my.custom.CatalogImpl',
+     -- More table properties for the customized catalog
+    'my-additional-catalog-config'='my-value',
+     ...
+);
+```
+
+Please check sections under the Integrations tab for all custom catalogs.
+
+## A complete example.
+
+Take the Hive catalog as an example:
+
+```sql
+CREATE TABLE flink_table (
+    id   BIGINT,
+    data STRING
+) WITH (
+    'connector'='iceberg',
+    'catalog-name'='hive_prod',
+    'uri'='thrift://localhost:9083',
+    'warehouse'='file:///path/to/warehouse'
+);
+
+INSERT INTO flink_table VALUES (1, 'AAA'), (2, 'BBB'), (3, 'CCC');
+
+SET execution.result-mode=tableau;
+SELECT * FROM flink_table;
+
++----+------+
+| id | data |
++----+------+
+|  1 |  AAA |
+|  2 |  BBB |
+|  3 |  CCC |
++----+------+
+3 rows in set
+```
+
+For more details, please refer to the Iceberg [Flink document](../flink).
\ No newline at end of file
diff --git a/docs/content/docs/flink/flink-getting-started.md b/docs/content/docs/flink/flink-getting-started.md
new file mode 100644
index 0000000..edd7bd2
--- /dev/null
+++ b/docs/content/docs/flink/flink-getting-started.md
@@ -0,0 +1,557 @@
+---
+title: "Getting Started"
+weight: 300
+url: flink
+---
+<!--
+ - Licensed to the Apache Software Foundation (ASF) under one or more
+ - contributor license agreements.  See the NOTICE file distributed with
+ - this work for additional information regarding copyright ownership.
+ - The ASF licenses this file to You under the Apache License, Version 2.0
+ - (the "License"); you may not use this file except in compliance with
+ - the License.  You may obtain a copy of the License at
+ -
+ -   http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing, software
+ - distributed under the License is distributed on an "AS IS" BASIS,
+ - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ - See the License for the specific language governing permissions and
+ - limitations under the License.
+ -->
+
+# Flink
+
+Apache Iceberg supports both [Apache Flink](https://flink.apache.org/)'s DataStream API and Table API to write records into an Iceberg table. Currently,
+we only integrate Iceberg with Apache Flink 1.11.x.
+
+| Feature support                                                        |  Flink 1.11.0      |  Notes                                                 |
+|------------------------------------------------------------------------|--------------------|--------------------------------------------------------|
+| [SQL create catalog](#creating-catalogs-and-using-catalogs)            | ✔️                 |                                                        |
+| [SQL create database](#create-database)                                | ✔️                 |                                                        |
+| [SQL create table](#create-table)                                      | ✔️                 |                                                        |
+| [SQL create table like](#create-table-like)                            | ✔️                 |                                                        |
+| [SQL alter table](#alter-table)                                        | ✔️                 | Only support altering table properties, Columns/PartitionKey changes are not supported now|
+| [SQL drop_table](#drop-table)                                          | ✔️                 |                                                        |
+| [SQL select](#querying-with-sql)                                       | ✔️                 | Support both streaming and batch mode                  |
+| [SQL insert into](#insert-into)                                        | ✔️ ️               | Support both streaming and batch mode                  |
+| [SQL insert overwrite](#insert-overwrite)                              | ✔️ ️               |                                                        |
+| [DataStream read](#reading-with-datastream)                            | ✔️ ️               |                                                        |
+| [DataStream append](#appending-data)                                   | ✔️ ️               |                                                        |
+| [DataStream overwrite](#overwrite-data)                                | ✔️ ️               |                                                        |
+| [Metadata tables](#inspecting-tables)                                  |    ️               | Support Java API but does not support Flink SQL        |
+| [Rewrite files action](#rewrite-files-action)                          | ✔️ ️               |                                                        |
+
+## Preparation when using Flink SQL Client
+
+To create iceberg table in flink, we recommend to use [Flink SQL Client](https://ci.apache.org/projects/flink/flink-docs-stable/dev/table/sqlClient.html) because it's easier for users to understand the concepts.
+
+Step.1 Downloading the flink 1.11.x binary package from the apache flink [download page](https://flink.apache.org/downloads.html). We now use scala 2.12 to archive the apache iceberg-flink-runtime jar, so it's recommended to use flink 1.11 bundled with scala 2.12.
+
+```bash
+FLINK_VERSION=1.11.1
+SCALA_VERSION=2.12
+APACHE_FLINK_URL=archive.apache.org/dist/flink/
+wget ${APACHE_FLINK_URL}/flink-${FLINK_VERSION}/flink-${FLINK_VERSION}-bin-scala_${SCALA_VERSION}.tgz
+tar xzvf flink-${FLINK_VERSION}-bin-scala_${SCALA_VERSION}.tgz
+```
+
+Step.2 Start a standalone flink cluster within hadoop environment.
+
+```bash
+# HADOOP_HOME is your hadoop root directory after unpack the binary package.
+export HADOOP_CLASSPATH=`$HADOOP_HOME/bin/hadoop classpath`
+
+# Start the flink standalone cluster
+./bin/start-cluster.sh
+```
+
+Step.3 Start the flink SQL client.
+
+We've created a separate `flink-runtime` module in iceberg project to generate a bundled jar, which could be loaded by flink SQL client directly.
+
+If we want to build the `flink-runtime` bundled jar manually, please just build the `iceberg` project and it will generate the jar under `<iceberg-root-dir>/flink-runtime/build/libs`. Of course, we could also download the `flink-runtime` jar from the [apache official repository](https://repo.maven.apache.org/maven2/org/apache/iceberg/iceberg-flink-runtime/).
+
+```bash
+# HADOOP_HOME is your hadoop root directory after unpack the binary package.
+export HADOOP_CLASSPATH=`$HADOOP_HOME/bin/hadoop classpath`
+
+./bin/sql-client.sh embedded -j <flink-runtime-directory>/iceberg-flink-runtime-xxx.jar shell
+```
+
+By default, iceberg has included hadoop jars for hadoop catalog. If we want to use hive catalog, we will need to load the hive jars when opening the flink sql client. Fortunately, apache flink has provided a [bundled hive jar](https://repo.maven.apache.org/maven2/org/apache/flink/flink-sql-connector-hive-2.3.6_2.11/1.11.0/flink-sql-connector-hive-2.3.6_2.11-1.11.0.jar) for sql client. So we could open the sql client
+as the following:
+
+```bash
+# HADOOP_HOME is your hadoop root directory after unpack the binary package.
+export HADOOP_CLASSPATH=`$HADOOP_HOME/bin/hadoop classpath`
+
+# download Iceberg dependency
+ICEBERG_VERSION=0.11.1
+MAVEN_URL=https://repo1.maven.org/maven2
+ICEBERG_MAVEN_URL=${MAVEN_URL}/org/apache/iceberg
+ICEBERG_PACKAGE=iceberg-flink-runtime
+wget ${ICEBERG_MAVEN_URL}/${ICEBERG_PACKAGE}/${ICEBERG_VERSION}/${ICEBERG_PACKAGE}-${ICEBERG_VERSION}.jar
+
+# download the flink-sql-connector-hive-${HIVE_VERSION}_${SCALA_VERSION}-${FLINK_VERSION}.jar
+HIVE_VERSION=2.3.6
+SCALA_VERSION=2.11
+FLINK_VERSION=1.11.0
+FLINK_CONNECTOR_URL=${MAVEN_URL}/org/apache/flink
+FLINK_CONNECTOR_PACKAGE=flink-sql-connector-hive
+wget ${FLINK_CONNECTOR_URL}/${FLINK_CONNECTOR_PACKAGE}-${HIVE_VERSION}_${SCALA_VERSION}/${FLINK_VERSION}/${FLINK_CONNECTOR_PACKAGE}-${HIVE_VERSION}_${SCALA_VERSION}-${FLINK_VERSION}.jar
+
+# open the SQL client.
+/path/to/bin/sql-client.sh embedded \
+    -j ${ICEBERG_PACKAGE}-${ICEBERG_VERSION}.jar \
+    -j ${FLINK_CONNECTOR_PACKAGE}-${HIVE_VERSION}_${SCALA_VERSION}-${FLINK_VERSION}.jar \
+    shell
+```
+## Preparation when using Flink's Python API
+
+Install the Apache Flink dependency using `pip`
+```python
+pip install apache-flink==1.11.1
+```
+
+In order for `pyflink` to function properly, it needs to have access to all Hadoop jars. For `pyflink`
+we need to copy those Hadoop jars to the installation directory of `pyflink`, which can be found under
+`<PYTHON_ENV_INSTALL_DIR>/site-packages/pyflink/lib/` (see also a mention of this on
+the [Flink ML](http://mail-archives.apache.org/mod_mbox/flink-user/202105.mbox/%3C3D98BDD2-89B1-42F5-B6F4-6C06A038F978%40gmail.com%3E)).
+We can use the following short Python script to copy all Hadoop jars (you need to make sure that `HADOOP_HOME`
+points to your Hadoop installation):
+
+```python
+import os
+import shutil
+import site
+
+
+def copy_all_hadoop_jars_to_pyflink():
+    if not os.getenv("HADOOP_HOME"):
+        raise Exception("The HADOOP_HOME env var must be set and point to a valid Hadoop installation")
+
+    jar_files = []
+
+    def find_pyflink_lib_dir():
+        for dir in site.getsitepackages():
+            package_dir = os.path.join(dir, "pyflink", "lib")
+            if os.path.exists(package_dir):
+                return package_dir
+        return None
+
+    for root, _, files in os.walk(os.getenv("HADOOP_HOME")):
+        for file in files:
+            if file.endswith(".jar"):
+                jar_files.append(os.path.join(root, file))
+
+    pyflink_lib_dir = find_pyflink_lib_dir()
+
+    num_jar_files = len(jar_files)
+    print(f"Copying {num_jar_files} Hadoop jar files to pyflink's lib directory at {pyflink_lib_dir}")
+    for jar in jar_files:
+        shutil.copy(jar, pyflink_lib_dir)
+
+
+if __name__ == '__main__':
+    copy_all_hadoop_jars_to_pyflink()
+```
+
+Once the script finished, you should see output similar to
+```
+Copying 645 Hadoop jar files to pyflink's lib directory at <PYTHON_DIR>/lib/python3.8/site-packages/pyflink/lib
+```
+
+Now we need to provide a `file://` path to the `iceberg-flink-runtime` jar, which we can either get by building the project
+and looking at `<iceberg-root-dir>/flink-runtime/build/libs`, or downloading it from the [Apache official repository](https://repo.maven.apache.org/maven2/org/apache/iceberg/iceberg-flink-runtime/).
+Third-party libs can be added to `pyflink` via `env.add_jars("file:///my/jar/path/connector.jar")` / `table_env.get_config().get_configuration().set_string("pipeline.jars", "file:///my/jar/path/connector.jar")`, which is also mentioned in the official [docs](https://ci.apache.org/projects/flink/flink-docs-release-1.13/docs/dev/python/dependency_management/).
+In our example we're using `env.add_jars(..)` as shown below:
+
+```python
+import os
+
+from pyflink.datastream import StreamExecutionEnvironment
+
+env = StreamExecutionEnvironment.get_execution_environment()
+iceberg_flink_runtime_jar = os.path.join(os.getcwd(), "iceberg-flink-runtime-{{% icebergVersion %}}.jar")
+
+env.add_jars("file://{}".format(iceberg_flink_runtime_jar))
+```
+
+Once we reached this point, we can then create a `StreamTableEnvironment` and execute Flink SQL statements. 
+The below example shows how to create a custom catalog via the Python Table API:
+```python
+from pyflink.table import StreamTableEnvironment
+table_env = StreamTableEnvironment.create(env)
+table_env.execute_sql("CREATE CATALOG my_catalog WITH ("
+                      "'type'='iceberg', "
+                      "'catalog-impl'='com.my.custom.CatalogImpl', "
+                      "'my-additional-catalog-config'='my-value')")
+```
+
+For more details, please refer to the [Python Table API](https://ci.apache.org/projects/flink/flink-docs-release-1.13/docs/dev/python/table/intro_to_table_api/).
+
+## Creating catalogs and using catalogs.
+
+Flink 1.11 support to create catalogs by using flink sql.
+
+### Catalog Configuration
+
+A catalog is created and named by executing the following query (replace `<catalog_name>` with your catalog name and
+`<config_key>`=`<config_value>` with catalog implementation config):   
+
+```sql
+CREATE CATALOG <catalog_name> WITH (
+  'type'='iceberg',
+  `<config_key>`=`<config_value>`
+); 
+```
+
+The following properties can be set globally and are not limited to a specific catalog implementation:
+
+* `type`: Must be `iceberg`. (required)
+* `catalog-type`: `hive` or `hadoop` for built-in catalogs, or left unset for custom catalog implementations using catalog-impl. (Optional)
+* `catalog-impl`: The fully-qualified class name custom catalog implementation, must be set if `catalog-type` is unset. (Optional)
+* `property-version`: Version number to describe the property version. This property can be used for backwards compatibility in case the property format changes. The current property version is `1`. (Optional)
+* `cache-enabled`: Whether to enable catalog cache, default value is `true`
+
+### Hive catalog
+
+This creates an iceberg catalog named `hive_catalog` that can be configured using `'catalog-type'='hive'`, which loads tables from a hive metastore:
+
+```sql
+CREATE CATALOG hive_catalog WITH (
+  'type'='iceberg',
+  'catalog-type'='hive',
+  'uri'='thrift://localhost:9083',
+  'clients'='5',
+  'property-version'='1',
+  'warehouse'='hdfs://nn:8020/warehouse/path'
+);
+```
+
+The following properties can be set if using the Hive catalog:
+
+* `uri`: The Hive metastore's thrift URI. (Required)
+* `clients`: The Hive metastore client pool size, default value is 2. (Optional)
+* `warehouse`: The Hive warehouse location, users should specify this path if neither set the `hive-conf-dir` to specify a location containing a `hive-site.xml` configuration file nor add a correct `hive-site.xml` to classpath.
+* `hive-conf-dir`: Path to a directory containing a `hive-site.xml` configuration file which will be used to provide custom Hive configuration values. The value of `hive.metastore.warehouse.dir` from `<hive-conf-dir>/hive-site.xml` (or hive configure file from classpath) will be overwrote with the `warehouse` value if setting both `hive-conf-dir` and `warehouse` when creating iceberg catalog.
+
+### Hadoop catalog
+
+Iceberg also supports a directory-based catalog in HDFS that can be configured using `'catalog-type'='hadoop'`:
+
+```sql
+CREATE CATALOG hadoop_catalog WITH (
+  'type'='iceberg',
+  'catalog-type'='hadoop',
+  'warehouse'='hdfs://nn:8020/warehouse/path',
+  'property-version'='1'
+);
+```
+
+The following properties can be set if using the Hadoop catalog:
+
+* `warehouse`: The HDFS directory to store metadata files and data files. (Required)
+
+We could execute the sql command `USE CATALOG hive_catalog` to set the current catalog.
+
+### Custom catalog
+
+Flink also supports loading a custom Iceberg `Catalog` implementation by specifying the `catalog-impl` property. Here is an example:
+
+```sql
+CREATE CATALOG my_catalog WITH (
+  'type'='iceberg',
+  'catalog-impl'='com.my.custom.CatalogImpl',
+  'my-additional-catalog-config'='my-value'
+);
+```
+
+### Create through YAML config
+
+Catalogs can be registered in `sql-client-defaults.yaml` before starting the SQL client. Here is an example:
+
+```yaml
+catalogs: 
+  - name: my_catalog
+    type: iceberg
+    catalog-type: hadoop
+    warehouse: hdfs://nn:8020/warehouse/path
+```
+
+## DDL commands
+
+### `CREATE DATABASE`
+
+By default, iceberg will use the `default` database in flink. Using the following example to create a separate database if we don't want to create tables under the `default` database:
+
+```sql
+CREATE DATABASE iceberg_db;
+USE iceberg_db;
+```
+
+### `CREATE TABLE`
+
+```sql
+CREATE TABLE `hive_catalog`.`default`.`sample` (
+    id BIGINT COMMENT 'unique id',
+    data STRING
+);
+```
+
+Table create commands support the most commonly used [flink create clauses](https://ci.apache.org/projects/flink/flink-docs-release-1.11/dev/table/sql/create.html#create-table) now, including: 
+
+* `PARTITION BY (column1, column2, ...)` to configure partitioning, apache flink does not yet support hidden partitioning.
+* `COMMENT 'table document'` to set a table description.
+* `WITH ('key'='value', ...)` to set [table configuration](../configuration) which will be stored in apache iceberg table properties.
+
+Currently, it does not support computed column, primary key and watermark definition etc.
+
+### `PARTITIONED BY`
+
+To create a partition table, use `PARTITIONED BY`:
+
+```sql
+CREATE TABLE `hive_catalog`.`default`.`sample` (
+    id BIGINT COMMENT 'unique id',
+    data STRING
+) PARTITIONED BY (data);
+```
+
+Apache Iceberg support hidden partition but apache flink don't support partitioning by a function on columns, so we've no way to support hidden partition in flink DDL now, we will improve apache flink DDL in future.
+
+### `CREATE TABLE LIKE`
+
+To create a table with the same schema, partitioning, and table properties as another table, use `CREATE TABLE LIKE`.
+
+```sql
+CREATE TABLE `hive_catalog`.`default`.`sample` (
+    id BIGINT COMMENT 'unique id',
+    data STRING
+);
+
+CREATE TABLE  `hive_catalog`.`default`.`sample_like` LIKE `hive_catalog`.`default`.`sample`;
+```
+
+For more details, refer to the [Flink `CREATE TABLE` documentation](https://ci.apache.org/projects/flink/flink-docs-release-1.11/dev/table/sql/create.html#create-table).
+
+
+### `ALTER TABLE`
+
+Iceberg only support altering table properties in flink 1.11 now.
+
+```sql
+ALTER TABLE `hive_catalog`.`default`.`sample` SET ('write.format.default'='avro')
+```
+
+### `ALTER TABLE .. RENAME TO`
+
+```sql
+ALTER TABLE `hive_catalog`.`default`.`sample` RENAME TO `hive_catalog`.`default`.`new_sample`;
+```
+
+### `DROP TABLE`
+
+To delete a table, run:
+
+```sql
+DROP TABLE `hive_catalog`.`default`.`sample`;
+```
+
+## Querying with SQL
+
+Iceberg support both streaming and batch read in flink now. we could execute the following sql command to switch the execute type from 'streaming' mode to 'batch' mode, and vice versa:
+
+```sql
+-- Execute the flink job in streaming mode for current session context
+SET execution.type = streaming
+
+-- Execute the flink job in batch mode for current session context
+SET execution.type = batch
+```
+
+### Flink batch read
+
+If want to check all the rows in iceberg table by submitting a flink __batch__ job, you could execute the following sentences:
+
+```sql
+-- Execute the flink job in batch mode for current session context
+SET execution.type = batch ;
+SELECT * FROM sample       ;
+```
+
+### Flink streaming read
+
+Iceberg supports processing incremental data in flink streaming jobs which starts from a historical snapshot-id:
+
+```sql
+-- Submit the flink job in streaming mode for current session.
+SET execution.type = streaming ;
+
+-- Enable this switch because streaming read SQL will provide few job options in flink SQL hint options.
+SET table.dynamic-table-options.enabled=true;
+
+-- Read all the records from the iceberg current snapshot, and then read incremental data starting from that snapshot.
+SELECT * FROM sample /*+ OPTIONS('streaming'='true', 'monitor-interval'='1s')*/ ;
+
+-- Read all incremental data starting from the snapshot-id '3821550127947089987' (records from this snapshot will be excluded).
+SELECT * FROM sample /*+ OPTIONS('streaming'='true', 'monitor-interval'='1s', 'start-snapshot-id'='3821550127947089987')*/ ;
+```
+
+Those are the options that could be set in flink SQL hint options for streaming job:
+
+* monitor-interval: time interval for consecutively monitoring newly committed data files (default value: '1s').
+* start-snapshot-id: the snapshot id that streaming job starts from.
+
+## Writing with SQL
+
+Iceberg support both `INSERT INTO` and `INSERT OVERWRITE` in flink 1.11 now.
+
+### `INSERT INTO`
+
+To append new data to a table with a flink streaming job, use `INSERT INTO`:
+
+```sql
+INSERT INTO `hive_catalog`.`default`.`sample` VALUES (1, 'a');
+INSERT INTO `hive_catalog`.`default`.`sample` SELECT id, data from other_kafka_table;
+```
+
+### `INSERT OVERWRITE`
+
+To replace data in the table with the result of a query, use `INSERT OVERWRITE` in batch job (flink streaming job does not support `INSERT OVERWRITE`). Overwrites are atomic operations for Iceberg tables.
+
+Partitions that have rows produced by the SELECT query will be replaced, for example:
+
+```sql
+INSERT OVERWRITE sample VALUES (1, 'a');
+```
+
+Iceberg also support overwriting given partitions by the `select` values:
+
+```sql
+INSERT OVERWRITE `hive_catalog`.`default`.`sample` PARTITION(data='a') SELECT 6;
+```
+
+For a partitioned iceberg table, when all the partition columns are set a value in `PARTITION` clause, it is inserting into a static partition, otherwise if partial partition columns (prefix part of all partition columns) are set a value in `PARTITION` clause, it is writing the query result into a dynamic partition.
+For an unpartitioned iceberg table, its data will be completely overwritten by `INSERT OVERWRITE`.
+
+## Reading with DataStream
+
+Iceberg support streaming or batch read in Java API now.
+
+### Batch Read
+
+This example will read all records from iceberg table and then print to the stdout console in flink batch job:
+
+```java
+StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironment();
+TableLoader tableLoader = TableLoader.fromHadoopTable("hdfs://nn:8020/warehouse/path");
+DataStream<RowData> batch = FlinkSource.forRowData()
+     .env(env)
+     .tableLoader(tableLoader)
+     .streaming(false)
+     .build();
+
+// Print all records to stdout.
+batch.print();
+
+// Submit and execute this batch read job.
+env.execute("Test Iceberg Batch Read");
+```
+
+### Streaming read
+
+This example will read incremental records which start from snapshot-id '3821550127947089987' and print to stdout console in flink streaming job:
+
+```java
+StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironment();
+TableLoader tableLoader = TableLoader.fromHadoopTable("hdfs://nn:8020/warehouse/path");
+DataStream<RowData> stream = FlinkSource.forRowData()
+     .env(env)
+     .tableLoader(tableLoader)
+     .streaming(true)
+     .startSnapshotId(3821550127947089987L)
+     .build();
+
+// Print all records to stdout.
+stream.print();
+
+// Submit and execute this streaming read job.
+env.execute("Test Iceberg Batch Read");
+```
+
+There are other options that we could set by Java API, please see the [FlinkSource#Builder](../javadoc/{{% icebergVersion %}}/org/apache/iceberg/flink/source/FlinkSource.html).
+
+## Writing with DataStream
+
+Iceberg support writing to iceberg table from different DataStream input.
+
+
+### Appending data.
+
+we have supported writing `DataStream<RowData>` and `DataStream<Row>` to the sink iceberg table natively.
+
+```java
+StreamExecutionEnvironment env = ...;
+
+DataStream<RowData> input = ... ;
+Configuration hadoopConf = new Configuration();
+TableLoader tableLoader = TableLoader.fromHadoopTable("hdfs://nn:8020/warehouse/path", hadoopConf);
+
+FlinkSink.forRowData(input)
+    .tableLoader(tableLoader)
+    .build();
+
+env.execute("Test Iceberg DataStream");
+```
+
+The iceberg API also allows users to write generic `DataStream<T>` to iceberg table, more example could be found in this [unit test](https://github.com/apache/iceberg/blob/master/flink/src/test/java/org/apache/iceberg/flink/sink/TestFlinkIcebergSink.java).
+
+### Overwrite data
+
+To overwrite the data in existing iceberg table dynamically, we could set the `overwrite` flag in FlinkSink builder.
+
+```java
+StreamExecutionEnvironment env = ...;
+
+DataStream<RowData> input = ... ;
+Configuration hadoopConf = new Configuration();
+TableLoader tableLoader = TableLoader.fromHadoopTable("hdfs://nn:8020/warehouse/path", hadoopConf);
+
+FlinkSink.forRowData(input)
+    .tableLoader(tableLoader)
+    .overwrite(true)
+    .build();
+
+env.execute("Test Iceberg DataStream");
+```
+
+## Inspecting tables.
+
+Iceberg does not support inspecting table in flink sql now, we need to use [iceberg's Java API](../api) to read iceberg's meta data to get those table information.
+
+## Rewrite files action.
+
+Iceberg provides API to rewrite small files into large files by submitting flink batch job. The behavior of this flink action is the same as the spark's [rewriteDataFiles](../maintenance/#compact-data-files).
+
+```java
+import org.apache.iceberg.flink.actions.Actions;
+
+TableLoader tableLoader = TableLoader.fromHadoopTable("hdfs://nn:8020/warehouse/path");
+Table table = tableLoader.loadTable();
+RewriteDataFilesActionResult result = Actions.forTable(table)
+        .rewriteDataFiles()
+        .execute();
+```
+
+For more doc about options of the rewrite files action, please see [RewriteDataFilesAction](../javadoc/{{% icebergVersion %}}/org/apache/iceberg/flink/actions/RewriteDataFilesAction.html)
+
+## Future improvement.
+
+There are some features that we do not yet support in the current flink iceberg integration work:
+
+* Don't support creating iceberg table with hidden partitioning. [Discussion](http://mail-archives.apache.org/mod_mbox/flink-dev/202008.mbox/%3cCABi+2jQCo3MsOa4+ywaxV5J-Z8TGKNZDX-pQLYB-dG+dVUMiMw@mail.gmail.com%3e) in flink mail list.
+* Don't support creating iceberg table with computed column.
+* Don't support creating iceberg table with watermark.
+* Don't support adding columns, removing columns, renaming columns, changing columns. [FLINK-19062](https://issues.apache.org/jira/browse/FLINK-19062) is tracking this.
diff --git a/docs/content/docs/format/_index.md b/docs/content/docs/format/_index.md
new file mode 100644
index 0000000..c7bf591
--- /dev/null
+++ b/docs/content/docs/format/_index.md
@@ -0,0 +1,5 @@
+---
+bookIconFa: fa-object-ungroup
+bookCollapseSection: true
+weight: 900
+---
\ No newline at end of file
diff --git a/docs/content/docs/format/spec.md b/docs/content/docs/format/spec.md
new file mode 100644
index 0000000..21947be
--- /dev/null
+++ b/docs/content/docs/format/spec.md
@@ -0,0 +1,4 @@
+---
+title: "Spec"
+bookUrlFromBaseURL: /../../spec
+---
\ No newline at end of file
diff --git a/docs/content/docs/format/terms.md b/docs/content/docs/format/terms.md
new file mode 100644
index 0000000..86761cd
--- /dev/null
+++ b/docs/content/docs/format/terms.md
@@ -0,0 +1,4 @@
+---
+title: "Terms"
+bookUrlFromBaseURL: /../../terms
+---
\ No newline at end of file
diff --git a/docs/content/docs/hive/_index.md b/docs/content/docs/hive/_index.md
new file mode 100644
index 0000000..77b32e7
--- /dev/null
+++ b/docs/content/docs/hive/_index.md
@@ -0,0 +1,350 @@
+---
+bookIconImage: ../img/hive-logo.png
+bookFlatSection: true
+url: hive
+weight: 400
+---
+<!--
+ - Licensed to the Apache Software Foundation (ASF) under one or more
+ - contributor license agreements.  See the NOTICE file distributed with
+ - this work for additional information regarding copyright ownership.
+ - The ASF licenses this file to You under the Apache License, Version 2.0
+ - (the "License"); you may not use this file except in compliance with
+ - the License.  You may obtain a copy of the License at
+ -
+ -   http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing, software
+ - distributed under the License is distributed on an "AS IS" BASIS,
+ - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ - See the License for the specific language governing permissions and
+ - limitations under the License.
+ -->
+
+# Hive
+
+Iceberg supports reading and writing Iceberg tables through [Hive](https://hive.apache.org) by using a [StorageHandler](https://cwiki.apache.org/confluence/display/Hive/StorageHandlers).
+Here is the current compatibility matrix for Iceberg Hive support: 
+
+| Feature                  | Hive 2.x               | Hive 3.1.2             |
+| ------------------------ | ---------------------- | ---------------------- |
+| CREATE EXTERNAL TABLE    | ✔️                     | ✔️                     |
+| CREATE TABLE             | ✔️                     | ✔️                     |
+| DROP TABLE               | ✔️                     | ✔️                     |
+| SELECT                   | ✔️ (MapReduce and Tez) | ✔️ (MapReduce and Tez) |
+| INSERT INTO              | ✔️ (MapReduce only)️    | ✔️ (MapReduce only)    |
+
+## Enabling Iceberg support in Hive
+
+### Loading runtime jar
+
+To enable Iceberg support in Hive, the `HiveIcebergStorageHandler` and supporting classes need to be made available on Hive's classpath. 
+These are provided by the `iceberg-hive-runtime` jar file. 
+For example, if using the Hive shell, this can be achieved by issuing a statement like so:
+
+```
+add jar /path/to/iceberg-hive-runtime.jar;
+```
+
+There are many others ways to achieve this including adding the jar file to Hive's auxiliary classpath so it is available by default.
+Please refer to Hive's documentation for more information.
+
+### Enabling support
+
+If the Iceberg storage handler is not in Hive's classpath, then Hive cannot load or update the metadata for an Iceberg table when the storage handler is set.
+To avoid the appearance of broken tables in Hive, Iceberg will not add the storage handler to a table unless Hive support is enabled.
+The storage handler is kept in sync (added or removed) every time Hive engine support for the table is updated, i.e. turned on or off in the table properties.
+There are two ways to enable Hive support: globally in Hadoop Configuration and per-table using a table property.
+
+#### Hadoop configuration
+
+To enable Hive support globally for an application, set `iceberg.engine.hive.enabled=true` in its Hadoop configuration. 
+For example, setting this in the `hive-site.xml` loaded by Spark will enable the storage handler for all tables created by Spark.
+
+{{< hint danger >}}
+Starting with Apache Iceberg `0.11.0`, when using Hive with Tez you also have to disable vectorization (`hive.vectorized.execution.enabled=false`).
+{{< /hint >}}
+
+#### Table property configuration
+
+Alternatively, the property `engine.hive.enabled` can be set to `true` and added to the table properties when creating the Iceberg table. 
+Here is an example of doing it programmatically:
+
+```java
+Catalog catalog = ...;
+Map<String, String> tableProperties = Maps.newHashMap();
+tableProperties.put(TableProperties.ENGINE_HIVE_ENABLED, "true"); // engine.hive.enabled=true
+catalog.createTable(tableId, schema, spec, tableProperties);
+```
+
+The table level configuration overrides the global Hadoop configuration.
+
+## Catalog Management
+
+### Global Hive catalog
+
+From the Hive engine's perspective, there is only one global data catalog that is defined in the Hadoop configuration in the runtime environment.
+In contrast, Iceberg supports multiple different data catalog types such as Hive, Hadoop, AWS Glue, or custom catalog implementations.
+Iceberg also allows loading a table directly based on its path in the file system. Those tables do not belong to any catalog.
+Users might want to read these cross-catalog and path-based tables through the Hive engine for use cases like join.
+
+To support this, a table in the Hive metastore can represent three different ways of loading an Iceberg table,
+depending on the table's `iceberg.catalog` property:
+
+1. The table will be loaded using a `HiveCatalog` that corresponds to the metastore configured in the Hive environment if no `iceberg.catalog` is set
+2. The table will be loaded using a custom catalog if `iceberg.catalog` is set to a catalog name (see below)
+3. The table can be loaded directly using the table's root location if `iceberg.catalog` is set to `location_based_table`
+
+For cases 2 and 3 above, users can create an overlay of an Iceberg table in the Hive metastore,
+so that different table types can work together in the same Hive environment.
+See [CREATE EXTERNAL TABLE](#create-external-table) and [CREATE TABLE](#create-table) for more details.
+
+### Custom Iceberg catalogs
+
+To globally register different catalogs, set the following Hadoop configurations:
+
+| Config Key                                    | Description                                            |
+| --------------------------------------------- | ------------------------------------------------------ |
+| iceberg.catalog.<catalog_name\>.type          | type of catalog: `hive`, `hadoop`, or left unset if using a custom catalog  |
+| iceberg.catalog.<catalog_name\>.catalog-impl  | catalog implementation, must not be null if type is empty |
+| iceberg.catalog.<catalog_name\>.<key\>        | any config key and value pairs for the catalog         |
+
+Here are some examples using Hive CLI:
+
+Register a `HiveCatalog` called `another_hive`:
+
+```
+SET iceberg.catalog.another_hive.type=hive;
+SET iceberg.catalog.another_hive.uri=thrift://example.com:9083;
+SET iceberg.catalog.another_hive.clients=10;
+SET iceberg.catalog.another_hive.warehouse=hdfs://example.com:8020/warehouse;
+```
+
+Register a `HadoopCatalog` called `hadoop`:
+
+```
+SET iceberg.catalog.hadoop.type=hadoop;
+SET iceberg.catalog.hadoop.warehouse=hdfs://example.com:8020/warehouse;
+```
+
+Register an AWS `GlueCatalog` called `glue`:
+
+```
+SET iceberg.catalog.glue.catalog-impl=org.apache.iceberg.aws.GlueCatalog;
+SET iceberg.catalog.glue.warehouse=s3://my-bucket/my/key/prefix;
+SET iceberg.catalog.glue.lock-impl=org.apache.iceberg.aws.glue.DynamoLockManager;
+SET iceberg.catalog.glue.lock.table=myGlueLockTable;
+```
+
+## DDL Commands
+
+### CREATE EXTERNAL TABLE
+
+The `CREATE EXTERNAL TABLE` command is used to overlay a Hive table "on top of" an existing Iceberg table. 
+Iceberg tables are created using either a [`Catalog`](../javadoc/master/index.html?org/apache/iceberg/catalog/Catalog.html),
+or an implementation of the [`Tables`](../javadoc/master/index.html?org/apache/iceberg/Tables.html) interface,
+and Hive needs to be configured accordingly to operate on these different types of table.
+
+#### Hive catalog tables
+
+As described before, tables created by the `HiveCatalog` with Hive engine feature enabled are directly visible by the Hive engine, so there is no need to create an overlay.
+
+#### Custom catalog tables
+
+For a table in a registered catalog, specify the catalog name in the statement using table property `iceberg.catalog`.
+For example, the SQL below creates an overlay for a table in a `hadoop` type catalog named `hadoop_cat`:
+
+```sql
+SET iceberg.catalog.hadoop_cat.type=hadoop;
+SET iceberg.catalog.hadoop_cat.warehouse=hdfs://example.com:8020/hadoop_cat;
+
+CREATE EXTERNAL TABLE database_a.table_a
+STORED BY 'org.apache.iceberg.mr.hive.HiveIcebergStorageHandler'
+TBLPROPERTIES ('iceberg.catalog'='hadoop_cat');
+```
+
+When `iceberg.catalog` is missing from both table properties and the global Hadoop configuration, `HiveCatalog` will be used as default.
+
+#### Path-based Hadoop tables
+
+Iceberg tables created using `HadoopTables` are stored entirely in a directory in a filesystem like HDFS.
+These tables are considered to have no catalog. 
+To indicate that, set `iceberg.catalog` property to `location_based_table`. For example:
+
+```sql
+CREATE EXTERNAL TABLE table_a 
+STORED BY 'org.apache.iceberg.mr.hive.HiveIcebergStorageHandler' 
+LOCATION 'hdfs://some_bucket/some_path/table_a'
+TBLPROPERTIES ('iceberg.catalog'='location_based_table');
+```
+
+### CREATE TABLE
+
+Hive also supports directly creating a new Iceberg table through `CREATE TABLE` statement. For example:
+
+```sql
+CREATE TABLE database_a.table_a (
+  id bigint, name string
+) PARTITIONED BY (
+  dept string
+) STORED BY 'org.apache.iceberg.mr.hive.HiveIcebergStorageHandler';
+```
+
+{{< hint info >}}
+to Hive, the table appears to be unpartitioned although the underlying Iceberg table is partitioned.
+{{< /hint >}}
+
+{{< hint info >}}
+Due to the limitation of Hive `PARTITIONED BY` syntax, if you use Hive `CREATE TABLE`, 
+currently you can only partition by columns, which is translated to Iceberg identity partition transform.
+You cannot partition by other Iceberg partition transforms such as `days(timestamp)`.
+To create table with all partition transforms, you need to create the table with other engines like Spark or Flink.
+{{< /hint >}}
+
+#### Custom catalog table
+
+You can also create a new table that is managed by a custom catalog. 
+For example, the following code creates a table in a custom Hadoop catalog:
+
+```sql
+SET iceberg.catalog.hadoop_cat.type=hadoop;
+SET iceberg.catalog.hadoop_cat.warehouse=hdfs://example.com:8020/hadoop_cat;
+
+CREATE TABLE database_a.table_a (
+  id bigint, name string
+) PARTITIONED BY (
+  dept string
+) STORED BY 'org.apache.iceberg.mr.hive.HiveIcebergStorageHandler'
+TBLPROPERTIES ('iceberg.catalog'='hadoop_cat');
+```
+
+{{< hint danger >}}
+If the table to create already exists in the custom catalog, this will create a managed overlay table.
+This means technically you can omit the `EXTERNAL` keyword when creating an overlay table.
+However, this is **not recommended** because creating managed overlay tables could pose a risk
+to the shared data files in case of accidental drop table commands from the Hive side, 
+which would unintentionally remove all the data in the table.
+{{< /hint >}}
+
+### DROP TABLE
+
+Tables can be dropped using the `DROP TABLE` command:
+
+```sql
+DROP TABLE [IF EXISTS] table_name [PURGE];
+```
+
+You can configure purge behavior through global Hadoop configuration or Hive metastore table properties:
+
+| Config key                  | Default                    | Description                                                     |
+| ----------------------------| ---------------------------| --------------------------------------------------------------- |
+| external.table.purge        | true                       | if all data and metadata should be purged in a table by default |
+
+Each Iceberg table's default purge behavior can also be configured through Iceberg table properties:
+
+| Property                    | Default                    | Description                                                       |
+| ----------------------------| ---------------------------| ----------------------------------------------------------------- |
+| gc.enabled                  | true                       | if all data and metadata should be purged in the table by default |
+
+When changing `gc.enabled` on the Iceberg table via `UpdateProperties`, `external.table.purge` is also updated on HMS table accordingly.
+When setting `external.table.purge` as a table prop during Hive `CREATE TABLE`, `gc.enabled` is pushed down accordingly to the Iceberg table properties.
+This makes sure that the 2 properties are always consistent at table level between Hive and Iceberg.
+
+{{< hint danger >}}
+Changing `external.table.purge` via Hive `ALTER TABLE SET TBLPROPERTIES` does not update `gc.enabled` on the Iceberg table. 
+This is a limitation on Hive 3.1.2 because the `HiveMetaHook` doesn't have all the hooks for alter tables yet.
+{{< /hint >}}
+
+## Querying with SQL
+
+Here are the features highlights for Iceberg Hive read support:
+
+1. **Predicate pushdown**: Pushdown of the Hive SQL `WHERE` clause has been implemented so that these filters are used at the Iceberg `TableScan` level as well as by the Parquet and ORC Readers.
+2. **Column projection**: Columns from the Hive SQL `SELECT` clause are projected down to the Iceberg readers to reduce the number of columns read.
+3. **Hive query engines**: Both the MapReduce and Tez query execution engines are supported.
+
+### Configurations
+
+Here are the Hadoop configurations that one can adjust for the Hive reader:
+
+| Config key                   | Default                 | Description                                            |
+| ---------------------------- | ----------------------- | ------------------------------------------------------ |
+| iceberg.mr.reuse.containers  | false                   | if Avro reader should reuse containers                 |
+| iceberg.mr.case.sensitive    | true                    | if the query is case-sensitive                         |
+
+### SELECT
+
+You should now be able to issue Hive SQL `SELECT` queries and see the results returned from the underlying Iceberg table, for example:
+
+```sql
+SELECT * from table_a;
+```
+
+## Writing with SQL
+
+### Configurations
+
+Here are the Hadoop configurations that one can adjust for the Hive writer:
+
+| Config key                                        | Default                                  | Description                                            |
+| ------------------------------------------------- | ---------------------------------------- | ------------------------------------------------------ |
+| iceberg.mr.commit.table.thread.pool.size          | 10                                       | the number of threads of a shared thread pool to execute parallel commits for output tables |
+| iceberg.mr.commit.file.thread.pool.size           | 10                                       | the number of threads of a shared thread pool to execute parallel commits for files in each output table |
+
+### INSERT INTO
+
+Hive supports the standard single-table `INSERT INTO` operation:
+
+```sql
+INSERT INTO table_a VALUES ('a', 1);
+INSERT INTO table_a SELECT ...;
+```
+
+Multi-table insert is also supported, but it will not be atomic and are committed one table at a time. Partial changes will be visible during the commit process and failures can leave partial changes committed. Changes within a single table will remain atomic.
+
+Here is an example of inserting into multiple tables at once in Hive SQL:
+```sql
+FROM customers
+    INSERT INTO target1 SELECT customer_id, first_name
+    INSERT INTO target2 SELECT last_name, customer_id;
+```
+
+
+## Type compatibility
+
+Hive and Iceberg support different set of types. Iceberg can perform type conversion automatically, but not for all combinations,
+so you may want to understand the type conversion in Iceberg in prior to design the types of columns in your tables.
+You can enable auto-conversion through Hadoop configuration (not enabled by default):
+
+| Config key                               | Default                     | Description                                         |
+| -----------------------------------------| --------------------------- | --------------------------------------------------- |
+| iceberg.mr.schema.auto.conversion        | false                       | if Hive should perform type auto-conversion         |
+
+### Hive type to Iceberg type
+
+This type conversion table describes how Hive types are converted to the Iceberg types.
+The conversion applies on both creating Iceberg table and writing to Iceberg table via Hive.
+
+| Hive             | Iceberg                 | Notes |
+|------------------|-------------------------|-------|
+| boolean          | boolean                 |       |
+| short            | integer                 | auto-conversion |
+| byte             | integer                 | auto-conversion |
+| integer          | integer                 |       |
+| long             | long                    |       |
+| float            | float                   |       |
+| double           | double                  |       |
+| date             | date                    |       |
+| timestamp        | timestamp without timezone |    |
+| timestamplocaltz | timestamp with timezone | Hive 3 only |
+| interval_year_month |                      | not supported |
+| interval_day_time |                        | not supported |
+| char             | string                  | auto-conversion |
+| varchar          | string                  | auto-conversion |
+| string           | string                  |       |
+| binary           | binary                  |       |
+| decimal          | decimal                 |       |
+| struct           | struct                  |       |
+| list             | list                    |       |
+| map              | map                     |       |
+| union            |                         | not supported |
diff --git a/docs/content/docs/integrations/_index.md b/docs/content/docs/integrations/_index.md
new file mode 100644
index 0000000..fec24ff
--- /dev/null
+++ b/docs/content/docs/integrations/_index.md
@@ -0,0 +1,5 @@
+---
+bookIconFa: fa-handshake-o
+bookCollapseSection: true
+weight: 500
+---
\ No newline at end of file
diff --git a/docs/content/docs/integrations/aws.md b/docs/content/docs/integrations/aws.md
new file mode 100644
index 0000000..0eec0f5
--- /dev/null
+++ b/docs/content/docs/integrations/aws.md
@@ -0,0 +1,494 @@
+---
+title: "AWS"
+url: aws
+---
+<!--
+ - Licensed to the Apache Software Foundation (ASF) under one or more
+ - contributor license agreements.  See the NOTICE file distributed with
+ - this work for additional information regarding copyright ownership.
+ - The ASF licenses this file to You under the Apache License, Version 2.0
+ - (the "License"); you may not use this file except in compliance with
+ - the License.  You may obtain a copy of the License at
+ -
+ -   http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing, software
+ - distributed under the License is distributed on an "AS IS" BASIS,
+ - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ - See the License for the specific language governing permissions and
+ - limitations under the License.
+ -->
+ 
+# Iceberg AWS Integrations
+
+Iceberg provides integration with different AWS services through the `iceberg-aws` module. 
+This section describes how to use Iceberg with AWS.
+
+## Enabling AWS Integration
+
+The `iceberg-aws` module is bundled with Spark and Flink engine runtimes for all versions from `0.11.0` onwards.
+However, the AWS clients are not bundled so that you can use the same client version as your application.
+You will need to provide the AWS v2 SDK because that is what Iceberg depends on.
+You can choose to use the [AWS SDK bundle](https://mvnrepository.com/artifact/software.amazon.awssdk/bundle), 
+or individual AWS client packages (Glue, S3, DynamoDB, KMS, STS) if you would like to have a minimal dependency footprint.
+
+All the default AWS clients use the [URL Connection HTTP Client](https://mvnrepository.com/artifact/software.amazon.awssdk/url-connection-client)
+for HTTP connection management.
+This dependency is not part of the AWS SDK bundle and needs to be added separately.
+To choose a different HTTP client library such as [Apache HTTP Client](https://mvnrepository.com/artifact/software.amazon.awssdk/apache-client),
+see the section [client customization](#aws-client-customization) for more details.
+
+All the AWS module features can be loaded through custom catalog properties,
+you can go to the documentations of each engine to see how to load a custom catalog.
+Here are some examples.
+
+### Spark
+
+For example, to use AWS features with Spark 3 and AWS clients version 2.15.40, you can start the Spark SQL shell with:
+
+```sh
+# add Iceberg dependency
+ICEBERG_VERSION={{% icebergVersion %}}
+DEPENDENCIES="org.apache.iceberg:iceberg-spark3-runtime:$ICEBERG_VERSION"
+
+# add AWS dependnecy
+AWS_SDK_VERSION=2.15.40
+AWS_MAVEN_GROUP=software.amazon.awssdk
+AWS_PACKAGES=(
+    "bundle"
+    "url-connection-client"
+)
+for pkg in "${AWS_PACKAGES[@]}"; do
+    DEPENDENCIES+=",$AWS_MAVEN_GROUP:$pkg:$AWS_SDK_VERSION"
+done
+
+# start Spark SQL client shell
+spark-sql --packages $DEPENDENCIES \
+    --conf spark.sql.catalog.my_catalog=org.apache.iceberg.spark.SparkCatalog \
+    --conf spark.sql.catalog.my_catalog.warehouse=s3://my-bucket/my/key/prefix \
+    --conf spark.sql.catalog.my_catalog.catalog-impl=org.apache.iceberg.aws.glue.GlueCatalog \
+    --conf spark.sql.catalog.my_catalog.io-impl=org.apache.iceberg.aws.s3.S3FileIO \
+    --conf spark.sql.catalog.my_catalog.lock-impl=org.apache.iceberg.aws.glue.DynamoLockManager \
+    --conf spark.sql.catalog.my_catalog.lock.table=myGlueLockTable
+```
+
+As you can see, In the shell command, we use `--packages` to specify the additional AWS bundle and HTTP client dependencies with their version as `2.15.40`.
+
+### Flink
+
+To use AWS module with Flink, you can download the necessary dependencies and specify them when starting the Flink SQL client:
+
+```sh
+# download Iceberg dependency
+ICEBERG_VERSION={{% icebergVersion %}}
+MAVEN_URL=https://repo1.maven.org/maven2
+ICEBERG_MAVEN_URL=$MAVEN_URL/org/apache/iceberg
+wget $ICEBERG_MAVEN_URL/iceberg-flink-runtime/$ICEBERG_VERSION/iceberg-flink-runtime-$ICEBERG_VERSION.jar
+
+# download AWS dependnecy
+AWS_SDK_VERSION=2.15.40
+AWS_MAVEN_URL=$MAVEN_URL/software/amazon/awssdk
+AWS_PACKAGES=(
+    "bundle"
+    "url-connection-client"
+)
+for pkg in "${AWS_PACKAGES[@]}"; do
+    wget $AWS_MAVEN_URL/$pkg/$AWS_SDK_VERSION/$pkg-$AWS_SDK_VERSION.jar
+done
+
+# start Flink SQL client shell
+/path/to/bin/sql-client.sh embedded \
+    -j iceberg-flink-runtime-$ICEBERG_VERSION.jar \
+    -j bundle-$AWS_SDK_VERSION.jar \
+    -j url-connection-client-$AWS_SDK_VERSION.jar \
+    shell
+```
+
+With those dependencies, you can create a Flink catalog like the following:
+
+```sql
+CREATE CATALOG my_catalog WITH (
+  'type'='iceberg',
+  'warehouse'='s3://my-bucket/my/key/prefix',
+  'catalog-impl'='org.apache.iceberg.aws.glue.GlueCatalog',
+  'io-impl'='org.apache.iceberg.aws.s3.S3FileIO',
+  'lock-impl'='org.apache.iceberg.aws.glue.DynamoLockManager',
+  'lock.table'='myGlueLockTable'
+);
+```
+
+You can also specify the catalog configurations in `sql-client-defaults.yaml` to preload it:
+
+```yaml
+catalogs: 
+  - name: my_catalog
+    type: iceberg
+    warehouse: s3://my-bucket/my/key/prefix
+    catalog-impl: org.apache.iceberg.aws.glue.GlueCatalog
+    io-impl: org.apache.iceberg.aws.s3.S3FileIO
+    lock-impl: org.apache.iceberg.aws.glue.DynamoLockManager
+    lock.table: myGlueLockTable
+```
+
+### Hive
+
+To use AWS module with Hive, you can download the necessary dependencies similar to the Flink example,
+and then add them to the Hive classpath or add the jars at runtime in CLI:
+
+```
+add jar /my/path/to/iceberg-hive-runtime.jar;
+add jar /my/path/to/aws/bundle.jar;
+add jar /my/path/to/aws/url-connection-client.jar;
+```
+
+With those dependencies, you can register a Glue catalog and create external tables in Hive at runtime in CLI by:
+
+```sql
+SET iceberg.engine.hive.enabled=true;
+SET hive.vectorized.execution.enabled=false;
+SET iceberg.catalog.glue.catalog-impl=org.apache.iceberg.aws.glue.GlueCatalog;
+SET iceberg.catalog.glue.warehouse=s3://my-bucket/my/key/prefix;
+SET iceberg.catalog.glue.lock-impl=org.apache.iceberg.aws.glue.DynamoLockManager;
+SET iceberg.catalog.glue.lock.table=myGlueLockTable;
+
+-- suppose you have an Iceberg table database_a.table_a created by GlueCatalog
+CREATE EXTERNAL TABLE database_a.table_a
+STORED BY 'org.apache.iceberg.mr.hive.HiveIcebergStorageHandler'
+TBLPROPERTIES ('iceberg.catalog'='glue');
+```
+
+You can also preload the catalog by setting the configurations above in `hive-site.xml`.
+
+## Catalogs
+
+There are multiple different options that users can choose to build an Iceberg catalog with AWS.
+
+### Glue Catalog
+
+Iceberg enables the use of [AWS Glue](https://aws.amazon.com/glue) as the `Catalog` implementation.
+When used, an Iceberg namespace is stored as a [Glue Database](https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-catalog-databases.html), 
+an Iceberg table is stored as a [Glue Table](https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-catalog-tables.html),
+and every Iceberg table version is stored as a [Glue TableVersion](https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-catalog-tables.html#aws-glue-api-catalog-tables-TableVersion). 
+You can start using Glue catalog by specifying the `catalog-impl` as `org.apache.iceberg.aws.glue.GlueCatalog`,
+just like what is shown in the [enabling AWS integration](#enabling-aws-integration) section above. 
+More details about loading the catalog can be found in individual engine pages, such as [Spark](../spark-configuration/#loading-a-custom-catalog) and [Flink](../flink/#creating-catalogs-and-using-catalogs).
+
+#### Glue Catalog ID
+There is a unique Glue metastore in each AWS account and each AWS region.
+By default, `GlueCatalog` chooses the Glue metastore to use based on the user's default AWS client credential and region setup.
+You can specify the Glue catalog ID through `glue.id` catalog property to point to a Glue catalog in a different AWS account.
+The Glue catalog ID is your numeric AWS account ID.
+If the Glue catalog is in a different region, you should configure you AWS client to point to the correct region, 
+see more details in [AWS client customization](#aws-client-customization).
+
+#### Skip Archive
+
+By default, Glue stores all the table versions created and user can rollback a table to any historical version if needed.
+However, if you are streaming data to Iceberg, this will easily create a lot of Glue table versions.
+Therefore, it is recommended to turn off the archive feature in Glue by setting `glue.skip-archive` to `true`.
+For more details, please read [Glue Quotas](https://docs.aws.amazon.com/general/latest/gr/glue.html) and the [UpdateTable API](https://docs.aws.amazon.com/glue/latest/webapi/API_UpdateTable.html).
+
+#### DynamoDB for Commit Locking
+
+Glue does not have a strong guarantee over concurrent updates to a table. 
+Although it throws `ConcurrentModificationException` when detecting two processes updating a table at the same time,
+there is no guarantee that one update would not clobber the other update.
+Therefore, [DynamoDB](https://aws.amazon.com/dynamodb) can be used for Glue, so that for every commit, 
+`GlueCatalog` first obtains a lock using a helper DynamoDB table and then try to safely modify the Glue table.
+
+This feature requires the following lock related catalog properties:
+
+1. Set `lock-impl` as `org.apache.iceberg.aws.glue.DynamoLockManager`.
+2. Set `lock.table` as the DynamoDB table name you would like to use. If the lock table with the given name does not exist in DynamoDB, a new table is created with billing mode set as [pay-per-request](https://aws.amazon.com/blogs/aws/amazon-dynamodb-on-demand-no-capacity-planning-and-pay-per-request-pricing).
+
+Other lock related catalog properties can also be used to adjust locking behaviors such as heartbeat interval.
+For more details, please refer to [Lock catalog properties](../configuration/#lock-catalog-properties). 
+
+#### Warehouse Location
+
+Similar to all other catalog implementations, `warehouse` is a required catalog property to determine the root path of the data warehouse in storage.
+By default, Glue only allows a warehouse location in S3 because of the use of `S3FileIO`.
+To store data in a different local or cloud store, Glue catalog can switch to use `HadoopFileIO` or any custom FileIO by setting the `io-impl` catalog property.
+Details about this feature can be found in the [custom FileIO](../custom-catalog/#custom-file-io-implementation) section.
+
+#### Table Location
+
+By default, the root location for a table `my_table` of namespace `my_ns` is at `my-warehouse-location/my-ns.db/my-table`.
+This default root location can be changed at both namespace and table level.
+
+To use a different path prefix for all tables under a namespace, use AWS console or any AWS Glue client SDK you like to update the `locationUri` attribute of the corresponding Glue database.
+For example, you can update the `locationUri` of `my_ns` to `s3://my-ns-bucket`, 
+then any newly created table will have a default root location under the new prefix.
+For instance, a new table `my_table_2` will have its root location at `s3://my-ns-bucket/my_table_2`.
+
+To use a completely different root path for a specific table, set the `location` table property to the desired root path value you want.
+For example, in Spark SQL you can do:
+
+```sql
+CREATE TABLE my_catalog.my_ns.my_table (
+    id bigint,
+    data string,
+    category string)
+USING iceberg
+OPTIONS ('location'='s3://my-special-table-bucket')
+PARTITIONED BY (category);
+```
+
+For engines like Spark that supports the `LOCATION` keyword, the above SQL statement is equivalent to:
+
+```sql
+CREATE TABLE my_catalog.my_ns.my_table (
+    id bigint,
+    data string,
+    category string)
+USING iceberg
+LOCATION 's3://my-special-table-bucket'
+PARTITIONED BY (category);
+```
+
+### DynamoDB Catalog
+
+Iceberg supports using a [DynamoDB](https://aws.amazon.com/dynamodb) table to record and manage database and table information.
+
+#### Configurations
+
+The DynamoDB catalog supports the following configurations:
+
+| Property                          | Default                                            | Description                                            |
+| --------------------------------- | -------------------------------------------------- | ------------------------------------------------------ |
+| dynamodb.table-name               | iceberg                                            | name of the DynamoDB table used by DynamoDbCatalog     |
+
+
+#### Internal Table Design
+
+The DynamoDB table is designed with the following columns:
+
+| Column            | Key             | Type        | Description                                                          |
+| ----------------- | --------------- | ----------- |--------------------------------------------------------------------- |
+| identifier        | partition key   | string      | table identifier such as `db1.table1`, or string `NAMESPACE` for namespaces |
+| namespace         | sort key        | string      | namespace name. A [global secondary index (GSI)](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GSI.html) is created with namespace as partition key, identifier as sort key, no other projected columns |
+| v                 |                 | string      | row version, used for optimistic locking |
+| updated_at        |                 | number      | timestamp (millis) of the last update | 
+| created_at        |                 | number      | timestamp (millis) of the table creation |
+| p.<property_key\> |                 | string      | Iceberg-defined table properties including `table_type`, `metadata_location` and `previous_metadata_location` or namespace properties
+
+This design has the following benefits:
+
+1. it avoids potential [hot partition issue](https://aws.amazon.com/premiumsupport/knowledge-center/dynamodb-table-throttled/) if there are heavy write traffic to the tables within the same namespace, because the partition key is at the table level
+2. namespace operations are clustered in a single partition to avoid affecting table commit operations
+3. a sort key to partition key reverse GSI is used for list table operation, and all other operations are single row ops or single partition query. No full table scan is needed for any operation in the catalog.
+4. a string UUID version field `v` is used instead of `updated_at` to avoid 2 processes committing at the same millisecond
+5. multi-row transaction is used for `catalog.renameTable` to ensure idempotency
+6. properties are flattened as top level columns so that user can add custom GSI on any property field to customize the catalog. For example, users can store owner information as table property `owner`, and search tables by owner by adding a GSI on the `p.owner` column.
+
+### RDS JDBC Catalog
+
+Iceberg also supports JDBC catalog which uses a table in a relational database to manage Iceberg tables.
+You can configure to use JDBC catalog with relational database services like [AWS RDS](https://aws.amazon.com/rds).
+Read [the JDBC integration page](../jdbc/#jdbc-catalog) for guides and examples about using the JDBC catalog.
+Read [this AWS documentation](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.Connecting.Java.html) for more details about configuring JDBC catalog with IAM authentication. 
+
+### Which catalog to choose?
+
+With all the available options, we offer the following guidance when choosing the right catalog to use for your application:
+
+1. if your organization has an existing Glue metastore or plans to use the AWS analytics ecosystem including Glue, [Athena](https://aws.amazon.com/athena), [EMR](https://aws.amazon.com/emr), [Redshift](https://aws.amazon.com/redshift) and [LakeFormation](https://aws.amazon.com/lake-formation), Glue catalog provides the easiest integration.
+2. if your application requires frequent updates to table or high read and write throughput (e.g. streaming write), DynamoDB catalog provides the best performance through optimistic locking.
+3. if you would like to enforce access control for tables in a catalog, Glue tables can be managed as an [IAM resource](https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsglue.html), whereas DynamoDB catalog tables can only be managed through [item-level permission](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/specifying-conditions.html) which is much more complicated.
+4. if you would like to query tables based on table property information without the need to scan the entire catalog, DynamoDB catalog allows you to build secondary indexes for any arbitrary property field and provide efficient query performance.
+5. if you would like to have the benefit of DynamoDB catalog while also connect to Glue, you can enable [DynamoDB stream with Lambda trigger](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Streams.Lambda.Tutorial.html) to asynchronously update your Glue metastore with table information in the DynamoDB catalog. 
+6. if your organization already maintains an existing relational database in RDS or uses [serverless Aurora](https://aws.amazon.com/rds/aurora/serverless/) to manage tables, JDBC catalog provides the easiest integration.
+
+## S3 FileIO
+
+Iceberg allows users to write data to S3 through `S3FileIO`.
+`GlueCatalog` by default uses this `FileIO`, and other catalogs can load this `FileIO` using the `io-impl` catalog property.
+
+### Progressive Multipart Upload
+
+`S3FileIO` implements a customized progressive multipart upload algorithm to upload data.
+Data files are uploaded by parts in parallel as soon as each part is ready,
+and each file part is deleted as soon as its upload process completes.
+This provides maximized upload speed and minimized local disk usage during uploads.
+Here are the configurations that users can tune related to this feature:
+
+| Property                          | Default                                            | Description                                            |
+| --------------------------------- | -------------------------------------------------- | ------------------------------------------------------ |
+| s3.multipart.num-threads          | the available number of processors in the system   | number of threads to use for uploading parts to S3 (shared across all output streams)  |
+| s3.multipart.part-size-bytes      | 32MB                                               | the size of a single part for multipart upload requests  |
+| s3.multipart.threshold            | 1.5                                                | the threshold expressed as a factor times the multipart size at which to switch from uploading using a single put object request to uploading using multipart upload  |
+| s3.staging-dir                    | `java.io.tmpdir` property value                    | the directory to hold temporary files  |
+
+### S3 Server Side Encryption
+
+`S3FileIO` supports all 3 S3 server side encryption modes:
+
+* [SSE-S3](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html): When you use Server-Side Encryption with Amazon S3-Managed Keys (SSE-S3), each object is encrypted with a unique key. As an additional safeguard, it encrypts the key itself with a master key that it regularly rotates. Amazon S3 server-side encryption uses one of the strongest block ciphers available, 256-bit Advanced Encryption Standard (AES-256), to encrypt your data.
+* [SSE-KMS](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html): Server-Side Encryption with Customer Master Keys (CMKs) Stored in AWS Key Management Service (SSE-KMS) is similar to SSE-S3, but with some additional benefits and charges for using this service. There are separate permissions for the use of a CMK that provides added protection against unauthorized access of your objects in Amazon S3. SSE-KMS also provides you with an audit trail that shows when your CMK [...]
+* [SSE-C](https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html): With Server-Side Encryption with Customer-Provided Keys (SSE-C), you manage the encryption keys and Amazon S3 manages the encryption, as it writes to disks, and decryption, when you access your objects.
+
+To enable server side encryption, use the following configuration properties:
+
+| Property                          | Default                                  | Description                                            |
+| --------------------------------- | ---------------------------------------- | ------------------------------------------------------ |
+| s3.sse.type                       | `none`                                   | `none`, `s3`, `kms` or `custom`                        |
+| s3.sse.key                        | `aws/s3` for `kms` type, null otherwise  | A KMS Key ID or ARN for `kms` type, or a custom base-64 AES256 symmetric key for `custom` type.  |
+| s3.sse.md5                        | null                                     | If SSE type is `custom`, this value must be set as the base-64 MD5 digest of the symmetric key to ensure integrity. |
+
+### S3 Access Control List
+
+`S3FileIO` supports S3 access control list (ACL) for detailed access control. 
+User can choose the ACL level by setting the `s3.acl` property.
+For more details, please read [S3 ACL Documentation](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html).
+
+### Object Store File Layout
+
+S3 and many other cloud storage services [throttle requests based on object prefix](https://aws.amazon.com/premiumsupport/knowledge-center/s3-request-limit-avoid-throttling/).
+Data stored in S3 with a traditional Hive storage layout can face S3 request throttling as objects are stored under the same filepath prefix.
+
+Iceberg by default uses the Hive storage layout, but can be switched to use the `ObjectStoreLocationProvider`. 
+With `ObjectStoreLocationProvider`, a determenistic hash is generated for each stored file, with the hash appended 
+directly after the `write.data.path`. This ensures files written to s3 are equally distributed across multiple [prefixes](https://aws.amazon.com/premiumsupport/knowledge-center/s3-object-key-naming-pattern/) in the S3 bucket. Resulting in minimized throttling and maximized throughput for S3-related IO operations. When using `ObjectStoreLocationProvider` having a shared and short `write.data.path` across your Iceberg tables will improve performance.
+
+For more information on how S3 scales API QPS, checkout the 2018 re:Invent session on [Best Practices for Amazon S3 and Amazon S3 Glacier]( https://youtu.be/rHeTn9pHNKo?t=3219). At [53:39](https://youtu.be/rHeTn9pHNKo?t=3219) it covers how S3 scales/partitions & at [54:50](https://youtu.be/rHeTn9pHNKo?t=3290) it discusses the 30-60 minute wait time before new partitions are created.
+
+To use the `ObjectStorageLocationProvider` add `'write.object-storage.enabled'=true` in the table's properties. 
+Below is an example Spark SQL command to create a table using the `ObjectStorageLocationProvider`:
+```sql
+CREATE TABLE my_catalog.my_ns.my_table (
+    id bigint,
+    data string,
+    category string)
+USING iceberg
+OPTIONS (
+    'write.object-storage.enabled'=true, 
+    'write.data.path'='s3://my-table-data-bucket')
+PARTITIONED BY (category);
+```
+
+We can then insert a single row into this new table
+```SQL
+INSERT INTO my_catalog.my_ns.my_table VALUES (1, "Pizza", "orders");
+```
+
+Which will write the data to S3 with a hash (`2d3905f8`) appended directly after the `write.object-storage.path`, ensuring reads to the table are spread evenly  across [S3 bucket prefixes](https://docs.aws.amazon.com/AmazonS3/latest/userguide/optimizing-performance.html), and improving performance.
+```
+s3://my-table-data-bucket/2d3905f8/my_ns.db/my_table/category=orders/00000-0-5affc076-96a4-48f2-9cd2-d5efbc9f0c94-00001.parquet
+```
+
+Note, the path resolution logic for `ObjectStoreLocationProvider` is `write.data.path` then `<tableLocation>/data`.
+However, for the older versions up to 0.12.0, the logic is as follows:
+- before 0.12.0, `write.object-storage.path` must be set.
+- at 0.12.0, `write.object-storage.path` then `write.folder-storage.path` then `<tableLocation>/data`.
+
+For more details, please refer to the [LocationProvider Configuration](../custom-catalog/#custom-location-provider-implementation) section.  
+
+### S3 Strong Consistency
+
+In November 2020, S3 announced [strong consistency](https://aws.amazon.com/s3/consistency/) for all read operations, and Iceberg is updated to fully leverage this feature.
+There is no redundant consistency wait and check which might negatively impact performance during IO operations.
+
+### Hadoop S3A FileSystem
+
+Before `S3FileIO` was introduced, many Iceberg users choose to use `HadoopFileIO` to write data to S3 through the [S3A FileSystem](https://github.com/apache/hadoop/blob/trunk/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java).
+As introduced in the previous sections, `S3FileIO` adopts latest AWS clients and S3 features for optimized security and performance,
+ and is thus recommend for S3 use cases rather than the S3A FileSystem.
+
+`S3FileIO` writes data with `s3://` URI scheme, but it is also compatible with schemes written by the S3A FileSystem.
+This means for any table manifests containing `s3a://` or `s3n://` file paths, `S3FileIO` is still able to read them.
+This feature allows people to easily switch from S3A to `S3FileIO`.
+
+If for any reason you have to use S3A, here are the instructions:
+
+1. To store data using S3A, specify the `warehouse` catalog property to be an S3A path, e.g. `s3a://my-bucket/my-warehouse` 
+2. For `HiveCatalog`, to also store metadata using S3A, specify the Hadoop config property `hive.metastore.warehouse.dir` to be an S3A path.
+3. Add [hadoop-aws](https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-aws) as a runtime dependency of your compute engine.
+4. Configure AWS settings based on [hadoop-aws documentation](https://hadoop.apache.org/docs/current/hadoop-aws/tools/hadoop-aws/index.html) (make sure you check the version, S3A configuration varies a lot based on the version you use).   
+
+## AWS Client Customization
+
+Many organizations have customized their way of configuring AWS clients with their own credential provider, access proxy, retry strategy, etc.
+Iceberg allows users to plug in their own implementation of `org.apache.iceberg.aws.AwsClientFactory` by setting the `client.factory` catalog property.
+
+### Cross-Account and Cross-Region Access
+
+It is a common use case for organizations to have a centralized AWS account for Glue metastore and S3 buckets, and use different AWS accounts and regions for different teams to access those resources.
+In this case, a [cross-account IAM role](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) is needed to access those centralized resources.
+Iceberg provides an AWS client factory `AssumeRoleAwsClientFactory` to support this common use case.
+This also serves as an example for users who would like to implement their own AWS client factory.
+
+This client factory has the following configurable catalog properties:
+
+| Property                          | Default                                  | Description                                            |
+| --------------------------------- | ---------------------------------------- | ------------------------------------------------------ |
+| client.assume-role.arn            | null, requires user input                | ARN of the role to assume, e.g. arn:aws:iam::123456789:role/myRoleToAssume  |
+| client.assume-role.region         | null, requires user input                | All AWS clients except the STS client will use the given region instead of the default region chain  |
+| client.assume-role.external-id    | null                                     | An optional [external ID](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html)  |
+| client.assume-role.timeout-sec    | 1 hour                                   | Timeout of each assume role session. At the end of the timeout, a new set of role session credentials will be fetched through a STS client.  |
+
+By using this client factory, an STS client is initialized with the default credential and region to assume the specified role.
+The Glue, S3 and DynamoDB clients are then initialized with the assume-role credential and region to access resources.
+Here is an example to start Spark shell with this client factory:
+
+```shell
+spark-sql --packages org.apache.iceberg:iceberg-spark3-runtime:{{% icebergVersion %}},software.amazon.awssdk:bundle:2.15.40 \
+    --conf spark.sql.catalog.my_catalog=org.apache.iceberg.spark.SparkCatalog \
+    --conf spark.sql.catalog.my_catalog.warehouse=s3://my-bucket/my/key/prefix \    
+    --conf spark.sql.catalog.my_catalog.catalog-impl=org.apache.iceberg.aws.glue.GlueCatalog \
+    --conf spark.sql.catalog.my_catalog.client.factory=org.apache.iceberg.aws.AssumeRoleAwsClientFactory \
+    --conf spark.sql.catalog.my_catalog.client.assume-role.arn=arn:aws:iam::123456789:role/myRoleToAssume \
+    --conf spark.sql.catalog.my_catalog.client.assume-role.region=ap-northeast-1
+```
+
+## Run Iceberg on AWS
+
+### Amazon EMR
+
+[Amazon EMR](https://aws.amazon.com/emr/) can provision clusters with [Spark](https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-spark.html) (EMR 6 for Spark 3, EMR 5 for Spark 2),
+[Hive](https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-hive.html), [Flink](https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-flink.html),
+[Trino](https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-presto.html) that can run Iceberg.
+
+You can use a [bootstrap action](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-plan-bootstrap.html) similar to the following to pre-install all necessary dependencies:
+
+```sh
+#!/bin/bash
+
+AWS_SDK_VERSION=2.15.40
+ICEBERG_VERSION={{% icebergVersion %}}
+MAVEN_URL=https://repo1.maven.org/maven2
+ICEBERG_MAVEN_URL=$MAVEN_URL/org/apache/iceberg
+AWS_MAVEN_URL=$MAVEN_URL/software/amazon/awssdk
+# NOTE: this is just an example shared class path between Spark and Flink,
+#  please choose a proper class path for production.
+LIB_PATH=/usr/share/aws/aws-java-sdk/
+
+AWS_PACKAGES=(
+  "bundle"
+  "url-connection-client"
+)
+
+ICEBERG_PACKAGES=(
+  "iceberg-spark3-runtime"
+  "iceberg-flink-runtime"
+)
+
+install_dependencies () {
+  install_path=$1
+  download_url=$2
+  version=$3
+  shift
+  pkgs=("$@")
+  for pkg in "${pkgs[@]}"; do
+    sudo wget -P $install_path $download_url/$pkg/$version/$pkg-$version.jar
+  done
+}
+
+install_dependencies $LIB_PATH $ICEBERG_MAVEN_URL $ICEBERG_VERSION "${ICEBERG_PACKAGES[@]}"
+install_dependencies $LIB_PATH $AWS_MAVEN_URL $AWS_SDK_VERSION "${AWS_PACKAGES[@]}"
+```
+
+
+### Amazon Kinesis
+
+[Amazon Kinesis Data Analytics](https://aws.amazon.com/about-aws/whats-new/2019/11/you-can-now-run-fully-managed-apache-flink-applications-with-apache-kafka/) provides a platform 
+to run fully managed Apache Flink applications. You can include Iceberg in your application Jar and run it in the platform.
diff --git a/docs/content/docs/integrations/jdbc.md b/docs/content/docs/integrations/jdbc.md
new file mode 100644
index 0000000..c119eb4
--- /dev/null
+++ b/docs/content/docs/integrations/jdbc.md
@@ -0,0 +1,71 @@
+---
+title: "JDBC"
+url: jdbc
+---
+<!--
+ - Licensed to the Apache Software Foundation (ASF) under one or more
+ - contributor license agreements.  See the NOTICE file distributed with
+ - this work for additional information regarding copyright ownership.
+ - The ASF licenses this file to You under the Apache License, Version 2.0
+ - (the "License"); you may not use this file except in compliance with
+ - the License.  You may obtain a copy of the License at
+ -
+ -   http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing, software
+ - distributed under the License is distributed on an "AS IS" BASIS,
+ - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ - See the License for the specific language governing permissions and
+ - limitations under the License.
+ -->
+
+# Iceberg JDBC Integration
+
+## JDBC Catalog
+
+Iceberg supports using a table in a relational database to manage Iceberg tables through JDBC.
+The database that JDBC connects to must support atomic transaction to allow the JDBC catalog implementation to 
+properly support atomic Iceberg table commits and read serializable isolation.
+
+### Configurations
+
+Because each database and database service provider might require different configurations,
+the JDBC catalog allows arbitrary configurations through:
+
+| Property             | Default                           | Description                                            |
+| -------------------- | --------------------------------- | ------------------------------------------------------ |
+| uri                  |                                   | the JDBC connection string |
+| jdbc.<property_key\> |                                   | any key value pairs to configure the JDBC connection | 
+
+### Examples
+
+
+#### Spark
+
+You can start a Spark session with a MySQL JDBC connection using the following configurations:
+
+```shell
+spark-sql --packages org.apache.iceberg:iceberg-spark3-runtime:{{% icebergVersion %}} \
+    --conf spark.sql.catalog.my_catalog=org.apache.iceberg.spark.SparkCatalog \
+    --conf spark.sql.catalog.my_catalog.warehouse=s3://my-bucket/my/key/prefix \
+    --conf spark.sql.catalog.my_catalog.catalog-impl=org.apache.iceberg.jdbc.JdbcCatalog \
+    --conf spark.sql.catalog.my_catalog.uri=jdbc:mysql://test.1234567890.us-west-2.rds.amazonaws.com:3306/default \
+    --conf spark.sql.catalog.my_catalog.jdbc.verifyServerCertificate=true \
+    --conf spark.sql.catalog.my_catalog.jdbc.useSSL=true \
+    --conf spark.sql.catalog.my_catalog.jdbc.user=admin \
+    --conf spark.sql.catalog.my_catalog.jdbc.password=pass
+```
+
+#### Java API
+
+```java
+Class.forName("com.mysql.cj.jdbc.Driver"); // ensure JDBC driver is at runtime classpath
+Map<String, String> properties = new HashMap<>();
+properties.put(CatalogProperties.CATALOG_IMPL, JdbcCatalog.class.getName());
+properties.put(CatalogProperties.URI, "jdbc:mysql://localhost:3306/test");
+properties.put(JdbcCatalog.PROPERTY_PREFIX + "user", "admin");
+properties.put(JdbcCatalog.PROPERTY_PREFIX + "password", "pass");
+properties.put(CatalogProperties.WAREHOUSE_LOCATION, "s3://warehouse/path");
+Configuration hadoopConf = new Configuration(); // configs if you use HadoopFileIO
+JdbcCatalog catalog = CatalogUtil.buildIcebergCatalog("test_jdbc_catalog", properties, hadoopConf);
+```
\ No newline at end of file
diff --git a/docs/content/docs/integrations/nessie.md b/docs/content/docs/integrations/nessie.md
new file mode 100644
index 0000000..0ff09f4
--- /dev/null
+++ b/docs/content/docs/integrations/nessie.md
@@ -0,0 +1,161 @@
+---
+title: "Nessie"
+url: nessie
+---
+<!--
+ - Licensed to the Apache Software Foundation (ASF) under one or more
+ - contributor license agreements.  See the NOTICE file distributed with
+ - this work for additional information regarding copyright ownership.
+ - The ASF licenses this file to You under the Apache License, Version 2.0
+ - (the "License"); you may not use this file except in compliance with
+ - the License.  You may obtain a copy of the License at
+ -
+ -   http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing, software
+ - distributed under the License is distributed on an "AS IS" BASIS,
+ - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ - See the License for the specific language governing permissions and
+ - limitations under the License.
+ -->
+
+# Iceberg Nessie Integration
+
+Iceberg provides integration with Nessie through the `iceberg-nessie` module.
+This section describes how to use Iceberg with Nessie. Nessie provides several key features on top of Iceberg:
+
+* multi-table transactions
+* git-like operations (eg branches, tags, commits)
+* hive-like metastore capabilities
+
+See [Project Nessie](https://projectnessie.org) for more information on Nessie. Nessie requires a server to run, see
+[Getting Started](https://projectnessie.org/try/) to start a Nessie server.
+
+## Enabling Nessie Catalog
+
+The `iceberg-nessie` module is bundled with Spark and Flink runtimes for all versions from `0.11.0`. To get started
+with Nessie and Iceberg simply add the Iceberg runtime to your process. Eg: `spark-sql --packages
+org.apache.iceberg:iceberg-spark3-runtime:{{% icebergVersion %}}`. 
+
+## Spark SQL Extensions
+
+From spark, Nessie SQL extensions can be used to manage the Nessie repo as shown below. 
+
+```
+bin/spark-sql 
+  --packages "org.apache.iceberg:iceberg-spark3-runtime:{{% icebergVersion %}},org.projectnessie:nessie-spark-extensions:{{% nessieVersion %}}"
+  --conf spark.sql.extensions="org.apache.iceberg.spark.extensions.IcebergSparkSessionExtensions,org.projectnessie.spark.extensions.NessieSparkSessionExtensions"
+  --conf <other settings>
+```
+Please refer [Nessie SQL extension document](https://projectnessie.org/tools/sql/) to learn more about it.
+
+## Nessie Catalog
+
+One major feature introduced in release `0.11.0` is the ability to easily interact with a [Custom
+Catalog](../custom-catalog) from Spark and Flink. See [Spark Configuration](../spark-configuration#catalog-configuration)
+  and [Flink Configuration](../flink#custom-catalog) for instructions for adding a custom catalog to Iceberg. 
+
+To use the Nessie Catalog the following properties are required:
+
+* `warehouse`. Like most other catalogs the warehouse property is a file path to where this catalog should store tables.
+* `uri`. This is the Nessie server base uri. Eg `http://localhost:19120/api/v1`.
+* `ref` (optional). This is the Nessie branch or tag you want to work in.
+
+To run directly in Java this looks like:
+
+``` java
+Map<String, String> options = new HashMap<>();
+options.put("warehouse", "/path/to/warehouse");
+options.put("ref", "main");
+options.put("uri", "https://localhost:19120/api/v1");
+Catalog nessieCatalog = CatalogUtil.loadCatalog("org.apache.iceberg.nessie.NessieCatalog", "nessie", hadoopConfig, options);
+```
+
+and in Spark:
+
+``` java
+conf.set("spark.sql.catalog.nessie.warehouse", "/path/to/warehouse");
+conf.set("spark.sql.catalog.nessie.uri", "http://localhost:19120/api/v1")
+conf.set("spark.sql.catalog.nessie.ref", "main")
+conf.set("spark.sql.catalog.nessie.catalog-impl", "org.apache.iceberg.nessie.NessieCatalog")
+conf.set("spark.sql.catalog.nessie", "org.apache.iceberg.spark.SparkCatalog")
+conf.set("spark.sql.extensions", "org.apache.iceberg.spark.extensions.IcebergSparkSessionExtensions,org.projectnessie.spark.extensions.NessieSparkSessionExtensions")
+```
+This is how it looks in Flink via the Python API (additional details can be found [here](../flink#preparation-when-using-flinks-python-api)):
+```python
+import os
+from pyflink.datastream import StreamExecutionEnvironment
+from pyflink.table import StreamTableEnvironment
+
+env = StreamExecutionEnvironment.get_execution_environment()
+iceberg_flink_runtime_jar = os.path.join(os.getcwd(), "iceberg-flink-runtime-{{% icebergVersion %}}.jar")
+env.add_jars("file://{}".format(iceberg_flink_runtime_jar))
+table_env = StreamTableEnvironment.create(env)
+
+table_env.execute_sql("CREATE CATALOG nessie_catalog WITH ("
+                      "'type'='iceberg', "
+                      "'catalog-impl'='org.apache.iceberg.nessie.NessieCatalog', "
+                      "'uri'='http://localhost:19120/api/v1', "
+                      "'ref'='main', "
+                      "'warehouse'='/path/to/warehouse')")
+```
+
+There is nothing special above about the `nessie` name. A spark catalog can have any name, the important parts are the 
+settings for the `catalog-impl` and the required config to start Nessie correctly.
+Once you have a Nessie catalog you have access to your entire Nessie repo. You can then perform create/delete/merge
+operations on branches and perform commits on branches. Each Iceberg table in a Nessie Catalog is identified by an
+arbitrary length namespace and table name (eg `data.base.name.table`). These namespaces are implicit and don't need to
+be created separately. Any transaction on a Nessie enabled Iceberg table is a single commit in Nessie. Nessie commits
+can encompass an arbitrary number of actions on an arbitrary number of tables, however in Iceberg this will be limited
+to the set of single table transactions currently available.
+
+Further operations such as merges, viewing the commit log or diffs are performed by direct interaction with the
+`NessieClient` in java or by using the python client or cli. See [Nessie CLI](https://projectnessie.org/tools/cli/) for
+more details on the CLI and [Spark Guide](https://projectnessie.org/tools/iceberg/spark/) for a more complete description of 
+Nessie functionality.
+
+## Nessie and Iceberg
+
+For most cases Nessie acts just like any other Catalog for Iceberg: providing a logical organization of a set of tables
+and providing atomicity to transactions. However, using Nessie opens up other interesting possibilities. When using Nessie with
+Iceberg every Iceberg transaction becomes a Nessie commit. This history can be listed, merged or cherry-picked across branches.
+
+### Loosely coupled transactions
+
+By creating a branch and performing a set of operations on that branch you can approximate a multi-table transaction.
+A sequence of commits can be performed on the newly created branch and then merged back into the main branch atomically.
+This gives the appearance of a series of connected changes being exposed to the main branch simultaneously. While downstream
+consumers will see multiple transactions appear at once this isn't a true multi-table transaction on the database. It is 
+effectively a fast-forward merge of multiple commits (in git language) and each operation from the branch is its own distinct
+transaction and commit. This is different from a real multi-table transaction where all changes would be in the same commit.
+This does allow multiple applications to take part in modifying a branch and for this distributed set of transactions to be 
+exposed to the downstream users simultaneously.
+
+ 
+### Experimentation
+
+Changes to a table can be tested in a branch before merging back into main. This is particularly useful when performing
+large changes like schema evolution or partition evolution. A partition evolution could be performed in a branch and you
+would be able to test out the change (eg performance benchmarks) before merging it. This provides great flexibility in
+performing on-line table modifications and testing without interrupting downstream use cases. If the changes are
+incorrect or not performant the branch can be dropped without being merged.
+
+### Further use cases
+
+Please see the [Nessie Documentation](https://projectnessie.org/features/) for further descriptions of 
+Nessie features.
+
+{{< hint danger >}}
+Regular table maintenance in Iceberg is complicated when using nessie. Please consult
+[Management Services](https://projectnessie.org/features/management/) before performing any 
+[table maintenance](../maintenance).
+{{< /hint >}}
+
+## Example 
+
+Please have a look at the [Nessie Demos repo](https://github.com/projectnessie/nessie-demos)
+for different examples of Nessie and Iceberg in action together.
+
+## Future Improvements
+
+* Iceberg multi-table transactions. Changes to multiple Iceberg tables in the same transaction, isolation levels etc
diff --git a/docs/content/docs/prestodb/_index.md b/docs/content/docs/prestodb/_index.md
new file mode 100644
index 0000000..88304d7
--- /dev/null
+++ b/docs/content/docs/prestodb/_index.md
@@ -0,0 +1,23 @@
+---
+title: "Presto"
+bookIconImage: ../img/prestodb-logo.png
+bookFlatSection: true
+weight: 420
+bookExternalUrlNewWindow: https://prestodb.io/docs/current/connector/iceberg.html
+---
+<!--
+ - Licensed to the Apache Software Foundation (ASF) under one or more
+ - contributor license agreements.  See the NOTICE file distributed with
+ - this work for additional information regarding copyright ownership.
+ - The ASF licenses this file to You under the Apache License, Version 2.0
+ - (the "License"); you may not use this file except in compliance with
+ - the License.  You may obtain a copy of the License at
+ -
+ -   http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing, software
+ - distributed under the License is distributed on an "AS IS" BASIS,
+ - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ - See the License for the specific language governing permissions and
+ - limitations under the License.
+ -->
\ No newline at end of file
diff --git a/docs/content/docs/project/_index.md b/docs/content/docs/project/_index.md
new file mode 100644
index 0000000..1dd2bc8
--- /dev/null
+++ b/docs/content/docs/project/_index.md
@@ -0,0 +1,5 @@
+---
+bookIconFa: fa-wrench
+bookCollapseSection: true
+weight: 900
+---
\ No newline at end of file
diff --git a/docs/content/docs/project/benchmarks.md b/docs/content/docs/project/benchmarks.md
new file mode 100644
index 0000000..3b2289d
--- /dev/null
+++ b/docs/content/docs/project/benchmarks.md
@@ -0,0 +1,5 @@
+---
+title: "Benchmarks"
+bookUrlFromBaseURL: /../../benchmarks
+bookHidden: true
+---
\ No newline at end of file
diff --git a/docs/content/docs/project/how-to-release.md b/docs/content/docs/project/how-to-release.md
new file mode 100644
index 0000000..c8a4fb9
--- /dev/null
+++ b/docs/content/docs/project/how-to-release.md
@@ -0,0 +1,4 @@
+---
+title: "How to Release"
+bookUrlFromBaseURL: /../../how-to-release
+---
\ No newline at end of file
diff --git a/docs/content/docs/project/roadmap.md b/docs/content/docs/project/roadmap.md
new file mode 100644
index 0000000..b1b8424
--- /dev/null
+++ b/docs/content/docs/project/roadmap.md
@@ -0,0 +1,4 @@
+---
+title: "Roadmap"
+bookUrlFromBaseURL: /../../roadmap
+---
\ No newline at end of file
diff --git a/docs/content/docs/project/security.md b/docs/content/docs/project/security.md
new file mode 100644
index 0000000..b88418a
--- /dev/null
+++ b/docs/content/docs/project/security.md
@@ -0,0 +1,4 @@
+---
+title: "Security"
+bookUrlFromBaseURL: /../../security
+---
\ No newline at end of file
diff --git a/docs/content/docs/project/trademarks.md b/docs/content/docs/project/trademarks.md
new file mode 100644
index 0000000..58278b9
--- /dev/null
+++ b/docs/content/docs/project/trademarks.md
@@ -0,0 +1,4 @@
+---
+title: "Trademarks"
+bookUrlFromBaseURL: /../../trademarks
+---
\ No newline at end of file
diff --git a/docs/content/docs/releases/0.12.0/_index.md b/docs/content/docs/releases/0.12.0/_index.md
new file mode 100644
index 0000000..5b5b9d0
--- /dev/null
+++ b/docs/content/docs/releases/0.12.0/_index.md
@@ -0,0 +1,5 @@
+---
+title: "0.12.0"
+weight: 100
+bookUrlFromBaseURL: /0.12.0
+---
\ No newline at end of file
diff --git a/docs/content/docs/releases/0.12.1/_index.md b/docs/content/docs/releases/0.12.1/_index.md
new file mode 100644
index 0000000..f57fcc2
--- /dev/null
+++ b/docs/content/docs/releases/0.12.1/_index.md
@@ -0,0 +1,5 @@
+---
+title: "0.12.1"
+weight: 99
+bookUrlFromBaseURL: /0.12.1
+---
\ No newline at end of file
diff --git a/docs/content/docs/releases/_index.md b/docs/content/docs/releases/_index.md
new file mode 100644
index 0000000..f398380
--- /dev/null
+++ b/docs/content/docs/releases/_index.md
@@ -0,0 +1,5 @@
+---
+bookIconFa: fa-code-fork
+bookCollapseSection: true
+weight: 900
+---
\ No newline at end of file
diff --git a/docs/content/docs/releases/latest/_index.md b/docs/content/docs/releases/latest/_index.md
new file mode 100644
index 0000000..94d46ba
--- /dev/null
+++ b/docs/content/docs/releases/latest/_index.md
@@ -0,0 +1,5 @@
+---
+title: "Latest"
+weight: 98
+bookUrlFromBaseURL: /latest
+---
\ No newline at end of file
diff --git a/docs/content/docs/releases/release-notes.md b/docs/content/docs/releases/release-notes.md
new file mode 100644
index 0000000..468f5eb
--- /dev/null
+++ b/docs/content/docs/releases/release-notes.md
@@ -0,0 +1,4 @@
+---
+title: "Release Notes"
+bookUrlFromBaseURL: /../../releases
+---
\ No newline at end of file
diff --git a/docs/content/docs/spark/_index.md b/docs/content/docs/spark/_index.md
new file mode 100644
index 0000000..3a014d4
--- /dev/null
+++ b/docs/content/docs/spark/_index.md
@@ -0,0 +1,5 @@
+---
+bookIconFa: fa-star-o
+bookFlatSection: true
+weight: 200
+---
\ No newline at end of file
diff --git a/docs/content/docs/spark/spark-configuration.md b/docs/content/docs/spark/spark-configuration.md
new file mode 100644
index 0000000..5962ab0
--- /dev/null
+++ b/docs/content/docs/spark/spark-configuration.md
@@ -0,0 +1,186 @@
+---
+title: "Configuration"
+url: spark-configuration
+aliases:
+    - "spark/spark-configuration"
+---
+<!--
+ - Licensed to the Apache Software Foundation (ASF) under one or more
+ - contributor license agreements.  See the NOTICE file distributed with
+ - this work for additional information regarding copyright ownership.
+ - The ASF licenses this file to You under the Apache License, Version 2.0
+ - (the "License"); you may not use this file except in compliance with
+ - the License.  You may obtain a copy of the License at
+ -
+ -   http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing, software
+ - distributed under the License is distributed on an "AS IS" BASIS,
+ - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ - See the License for the specific language governing permissions and
+ - limitations under the License.
+ -->
+
+# Spark Configuration
+
+## Catalogs
+
+Spark 3.0 adds an API to plug in table catalogs that are used to load, create, and manage Iceberg tables. Spark catalogs are configured by setting Spark properties under `spark.sql.catalog`.
+
+This creates an Iceberg catalog named `hive_prod` that loads tables from a Hive metastore:
+
+```plain
+spark.sql.catalog.hive_prod = org.apache.iceberg.spark.SparkCatalog
+spark.sql.catalog.hive_prod.type = hive
+spark.sql.catalog.hive_prod.uri = thrift://metastore-host:port
+# omit uri to use the same URI as Spark: hive.metastore.uris in hive-site.xml
+```
+
+Iceberg also supports a directory-based catalog in HDFS that can be configured using `type=hadoop`:
+
+```plain
+spark.sql.catalog.hadoop_prod = org.apache.iceberg.spark.SparkCatalog
+spark.sql.catalog.hadoop_prod.type = hadoop
+spark.sql.catalog.hadoop_prod.warehouse = hdfs://nn:8020/warehouse/path
+```
+
+{{< hint info >}}
+The Hive-based catalog only loads Iceberg tables. To load non-Iceberg tables in the same Hive metastore, use a [session catalog](#replacing-the-session-catalog).
+{{< /hint >}}
+
+### Catalog configuration
+
+A catalog is created and named by adding a property `spark.sql.catalog.(catalog-name)` with an implementation class for its value.
+
+Iceberg supplies two implementations:
+
+* `org.apache.iceberg.spark.SparkCatalog` supports a Hive Metastore or a Hadoop warehouse as a catalog
+* `org.apache.iceberg.spark.SparkSessionCatalog` adds support for Iceberg tables to Spark's built-in catalog, and delegates to the built-in catalog for non-Iceberg tables
+
+Both catalogs are configured using properties nested under the catalog name. Common configuration properties for Hive and Hadoop are:
+
+| Property                                           | Values                        | Description                                                          |
+| -------------------------------------------------- | ----------------------------- | -------------------------------------------------------------------- |
+| spark.sql.catalog._catalog-name_.type              | `hive` or `hadoop`            | The underlying Iceberg catalog implementation, `HiveCatalog`, `HadoopCatalog` or left unset if using a custom catalog |
+| spark.sql.catalog._catalog-name_.catalog-impl      |                               | The underlying Iceberg catalog implementation.|
+| spark.sql.catalog._catalog-name_.default-namespace | default                       | The default current namespace for the catalog |
+| spark.sql.catalog._catalog-name_.uri               | thrift://host:port            | Metastore connect URI; default from `hive-site.xml` |
+| spark.sql.catalog._catalog-name_.warehouse         | hdfs://nn:8020/warehouse/path | Base path for the warehouse directory |
+| spark.sql.catalog._catalog-name_.cache-enabled     | `true` or `false`             | Whether to enable catalog cache, default value is `true` |
+
+Additional properties can be found in common [catalog configuration](../configuration#catalog-properties).
+
+
+### Using catalogs
+
+Catalog names are used in SQL queries to identify a table. In the examples above, `hive_prod` and `hadoop_prod` can be used to prefix database and table names that will be loaded from those catalogs.
+
+```sql
+SELECT * FROM hive_prod.db.table -- load db.table from catalog hive_prod
+```
+
+Spark 3 keeps track of the current catalog and namespace, which can be omitted from table names.
+
+```sql
+USE hive_prod.db;
+SELECT * FROM table -- load db.table from catalog hive_prod
+```
+
+To see the current catalog and namespace, run `SHOW CURRENT NAMESPACE`.
+
+### Replacing the session catalog
+
+To add Iceberg table support to Spark's built-in catalog, configure `spark_catalog` to use Iceberg's `SparkSessionCatalog`.
+
+```plain
+spark.sql.catalog.spark_catalog = org.apache.iceberg.spark.SparkSessionCatalog
+spark.sql.catalog.spark_catalog.type = hive
+```
+
+Spark's built-in catalog supports existing v1 and v2 tables tracked in a Hive Metastore. This configures Spark to use Iceberg's `SparkSessionCatalog` as a wrapper around that session catalog. When a table is not an Iceberg table, the built-in catalog will be used to load it instead.
+
+This configuration can use same Hive Metastore for both Iceberg and non-Iceberg tables.
+
+### Using catalog specific Hadoop configuration values
+
+Similar to configuring Hadoop properties by using `spark.hadoop.*`, it's possible to set per-catalog Hadoop configuration values when using Spark by adding the property for the catalog with the prefix `spark.sql.catalog.(catalog-name).hadoop.*`. These properties will take precedence over values configured globally using `spark.hadoop.*` and will only affect Iceberg tables.
+
+```plain
+spark.sql.catalog.hadoop_prod.hadoop.fs.s3a.endpoint = http://aws-local:9000
+```
+
+### Loading a custom catalog
+
+Spark supports loading a custom Iceberg `Catalog` implementation by specifying the `catalog-impl` property. Here is an example:
+
+```plain
+spark.sql.catalog.custom_prod = org.apache.iceberg.spark.SparkCatalog
+spark.sql.catalog.custom_prod.catalog-impl = com.my.custom.CatalogImpl
+spark.sql.catalog.custom_prod.my-additional-catalog-config = my-value
+```
+
+### Catalogs in Spark 2.4
+
+When using Iceberg 0.11.0 and later, Spark 2.4 can load tables from multiple Iceberg catalogs or from table locations.
+
+Catalogs in 2.4 are configured just like catalogs in 3.0, but only Iceberg catalogs are supported.
+
+
+## SQL Extensions
+
+Iceberg 0.11.0 and later add an extension module to Spark to add new SQL commands, like `CALL` for stored procedures or `ALTER TABLE ... WRITE ORDERED BY`.
+
+Using those SQL commands requires adding Iceberg extensions to your Spark environment using the following Spark property:
+
+
+| Spark extensions property | Iceberg extensions implementation                                   |
+|---------------------------|---------------------------------------------------------------------|
+| `spark.sql.extensions`    | `org.apache.iceberg.spark.extensions.IcebergSparkSessionExtensions` |
+
+SQL extensions are not available for Spark 2.4.
+
+
+## Runtime configuration
+
+### Read options
+
+Spark read options are passed when configuring the DataFrameReader, like this:
+
+```scala
+// time travel
+spark.read
+    .option("snapshot-id", 10963874102873L)
+    .table("catalog.db.table")
+```
+
+| Spark option    | Default               | Description                                                                               |
+| --------------- | --------------------- | ----------------------------------------------------------------------------------------- |
+| snapshot-id     | (latest)              | Snapshot ID of the table snapshot to read                                                 |
+| as-of-timestamp | (latest)              | A timestamp in milliseconds; the snapshot used will be the snapshot current at this time. |
+| split-size      | As per table property | Overrides this table's read.split.target-size and read.split.metadata-target-size         |
+| lookback        | As per table property | Overrides this table's read.split.planning-lookback                                       |
+| file-open-cost  | As per table property | Overrides this table's read.split.open-file-cost                                          |
+| vectorization-enabled  | As per table property | Overrides this table's read.parquet.vectorization.enabled                                          |
+| batch-size  | As per table property | Overrides this table's read.parquet.vectorization.batch-size                                          |
+
+### Write options
+
+Spark write options are passed when configuring the DataFrameWriter, like this:
+
+```scala
+// write with Avro instead of Parquet
+df.write
+    .option("write-format", "avro")
+    .option("snapshot-property.key", "value")
+    .insertInto("catalog.db.table")
+```
+
+| Spark option           | Default                    | Description                                                  |
+| ---------------------- | -------------------------- | ------------------------------------------------------------ |
+| write-format           | Table write.format.default | File format to use for this write operation; parquet, avro, or orc |
+| target-file-size-bytes | As per table property      | Overrides this table's write.target-file-size-bytes          |
+| check-nullability      | true                       | Sets the nullable check on fields                            |
+| snapshot-property._custom-key_    | null            | Adds an entry with custom-key and corresponding value in the snapshot summary  |
+| fanout-enabled       | false        | Overrides this table's write.spark.fanout.enabled  |
+| check-ordering       | true        | Checks if input schema and table schema are same  |
+
diff --git a/docs/content/docs/spark/spark-ddl.md b/docs/content/docs/spark/spark-ddl.md
new file mode 100644
index 0000000..1422444
--- /dev/null
+++ b/docs/content/docs/spark/spark-ddl.md
@@ -0,0 +1,343 @@
+---
+title: "DDL"
+url: spark-ddl
+aliases:
+    - "spark/spark-ddl"
+---
+<!--
+ - Licensed to the Apache Software Foundation (ASF) under one or more
+ - contributor license agreements.  See the NOTICE file distributed with
+ - this work for additional information regarding copyright ownership.
+ - The ASF licenses this file to You under the Apache License, Version 2.0
+ - (the "License"); you may not use this file except in compliance with
+ - the License.  You may obtain a copy of the License at
+ -
+ -   http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing, software
+ - distributed under the License is distributed on an "AS IS" BASIS,
+ - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ - See the License for the specific language governing permissions and
+ - limitations under the License.
+ -->
+
+# Spark DDL
+
+To use Iceberg in Spark, first configure [Spark catalogs](../spark-configuration).
+
+Iceberg uses Apache Spark's DataSourceV2 API for data source and catalog implementations. Spark DSv2 is an evolving API with different levels of support in Spark versions. Spark 2.4 does not support SQL DDL.
+
+{{< hint info >}}
+Spark 2.4 can't create Iceberg tables with DDL, instead use Spark 3.x or the [Iceberg API](..//java-api-quickstart).
+{{< /hint >}}
+
+## `CREATE TABLE`
+
+Spark 3.0 can create tables in any Iceberg catalog with the clause `USING iceberg`:
+
+```sql
+CREATE TABLE prod.db.sample (
+    id bigint COMMENT 'unique id',
+    data string)
+USING iceberg
+```
+
+Iceberg will convert the column type in Spark to corresponding Iceberg type. Please check the section of [type compatibility on creating table](../spark-writes#spark-type-to-iceberg-type) for details.
+
+Table create commands, including CTAS and RTAS, support the full range of Spark create clauses, including:
+
+* `PARTITION BY (partition-expressions)` to configure partitioning
+* `LOCATION '(fully-qualified-uri)'` to set the table location
+* `COMMENT 'table documentation'` to set a table description
+* `TBLPROPERTIES ('key'='value', ...)` to set [table configuration](../configuration)
+
+Create commands may also set the default format with the `USING` clause. This is only supported for `SparkCatalog` because Spark handles the `USING` clause differently for the built-in catalog.
+
+### `PARTITIONED BY`
+
+To create a partitioned table, use `PARTITIONED BY`:
+
+```sql
+CREATE TABLE prod.db.sample (
+    id bigint,
+    data string,
+    category string)
+USING iceberg
+PARTITIONED BY (category)
+```
+
+The `PARTITIONED BY` clause supports transform expressions to create [hidden partitions](../partitioning).
+
+```sql
+CREATE TABLE prod.db.sample (
+    id bigint,
+    data string,
+    category string,
+    ts timestamp)
+USING iceberg
+PARTITIONED BY (bucket(16, id), days(ts), category)
+```
+
+Supported transformations are:
+
+* `years(ts)`: partition by year
+* `months(ts)`: partition by month
+* `days(ts)` or `date(ts)`: equivalent to dateint partitioning
+* `hours(ts)` or `date_hour(ts)`: equivalent to dateint and hour partitioning
+* `bucket(N, col)`: partition by hashed value mod N buckets
+* `truncate(L, col)`: partition by value truncated to L
+    * Strings are truncated to the given length
+    * Integers and longs truncate to bins: `truncate(10, i)` produces partitions 0, 10, 20, 30, ...
+
+## `CREATE TABLE ... AS SELECT`
+
+Iceberg supports CTAS as an atomic operation when using a [`SparkCatalog`](../spark-configuration#catalog-configuration). CTAS is supported, but is not atomic when using [`SparkSessionCatalog`](../spark-configuration#replacing-the-session-catalog).
+
+```sql
+CREATE TABLE prod.db.sample
+USING iceberg
+AS SELECT ...
+```
+
+## `REPLACE TABLE ... AS SELECT`
+
+Iceberg supports RTAS as an atomic operation when using a [`SparkCatalog`](../spark-configuration#catalog-configuration). RTAS is supported, but is not atomic when using [`SparkSessionCatalog`](../spark-configuration#replacing-the-session-catalog).
+
+Atomic table replacement creates a new snapshot with the results of the `SELECT` query, but keeps table history.
+
+```sql
+REPLACE TABLE prod.db.sample
+USING iceberg
+AS SELECT ...
+```
+```sql
+CREATE OR REPLACE TABLE prod.db.sample
+USING iceberg
+AS SELECT ...
+```
+
+The schema and partition spec will be replaced if changed. To avoid modifying the table's schema and partitioning, use `INSERT OVERWRITE` instead of `REPLACE TABLE`.
+The new table properties in the `REPLACE TABLE` command will be merged with any existing table properties. The existing table properties will be updated if changed else they are preserved.
+
+## `DROP TABLE`
+
+To delete a table, run:
+
+```sql
+DROP TABLE prod.db.sample
+```
+
+
+## `ALTER TABLE`
+
+Iceberg has full `ALTER TABLE` support in Spark 3, including:
+
+* Renaming a table
+* Setting or removing table properties
+* Adding, deleting, and renaming columns
+* Adding, deleting, and renaming nested fields
+* Reordering top-level columns and nested struct fields
+* Widening the type of `int`, `float`, and `decimal` fields
+* Making required columns optional
+
+In addition, [SQL extensions](../spark-configuration#sql-extensions) can be used to add support for partition evolution and setting a table's write order
+
+### `ALTER TABLE ... RENAME TO`
+
+```sql
+ALTER TABLE prod.db.sample RENAME TO prod.db.new_name
+```
+
+### `ALTER TABLE ... SET TBLPROPERTIES`
+
+```sql
+ALTER TABLE prod.db.sample SET TBLPROPERTIES (
+    'read.split.target-size'='268435456'
+)
+```
+
+Iceberg uses table properties to control table behavior. For a list of available properties, see [Table configuration](../configuration).
+
+`UNSET` is used to remove properties:
+
+```sql
+ALTER TABLE prod.db.sample UNSET TBLPROPERTIES ('read.split.target-size')
+```
+
+### `ALTER TABLE ... ADD COLUMN`
+
+To add a column to Iceberg, use the `ADD COLUMNS` clause with `ALTER TABLE`:
+
+```sql
+ALTER TABLE prod.db.sample
+ADD COLUMNS (
+    new_column string comment 'new_column docs'
+  )
+```
+
+Multiple columns can be added at the same time, separated by commas.
+
+Nested columns should be identified using the full column name:
+
+```sql
+-- create a struct column
+ALTER TABLE prod.db.sample
+ADD COLUMN point struct<x: double, y: double>;
+
+-- add a field to the struct
+ALTER TABLE prod.db.sample
+ADD COLUMN point.z double
+```
+
+In Spark 2.4.4 and later, you can add columns in any position by adding `FIRST` or `AFTER` clauses:
+
+```sql
+ALTER TABLE prod.db.sample
+ADD COLUMN new_column bigint AFTER other_column
+```
+
+```sql
+ALTER TABLE prod.db.sample
+ADD COLUMN nested.new_column bigint FIRST
+```
+
+### `ALTER TABLE ... RENAME COLUMN`
+
+Iceberg allows any field to be renamed. To rename a field, use `RENAME COLUMN`:
+
+```sql
+ALTER TABLE prod.db.sample RENAME COLUMN data TO payload
+ALTER TABLE prod.db.sample RENAME COLUMN location.lat TO latitude
+```
+
+Note that nested rename commands only rename the leaf field. The above command renames `location.lat` to `location.latitude`
+
+### `ALTER TABLE ... ALTER COLUMN`
+
+Alter column is used to widen types, make a field optional, set comments, and reorder fields.
+
+Iceberg allows updating column types if the update is safe. Safe updates are:
+
+* `int` to `bigint`
+* `float` to `double`
+* `decimal(P,S)` to `decimal(P2,S)` when P2 > P (scale cannot change)
+
+```sql
+ALTER TABLE prod.db.sample ALTER COLUMN measurement TYPE double
+```
+
+To add or remove columns from a struct, use `ADD COLUMN` or `DROP COLUMN` with a nested column name.
+
+Column comments can also be updated using `ALTER COLUMN`:
+
+```sql
+ALTER TABLE prod.db.sample ALTER COLUMN measurement TYPE double COMMENT 'unit is bytes per second'
+ALTER TABLE prod.db.sample ALTER COLUMN measurement COMMENT 'unit is kilobytes per second'
+```
+
+Iceberg allows reordering top-level columns or columns in a struct using `FIRST` and `AFTER` clauses:
+
+```sql
+ALTER TABLE prod.db.sample ALTER COLUMN col FIRST
+```
+```sql
+ALTER TABLE prod.db.sample ALTER COLUMN nested.col AFTER other_col
+```
+
+Nullability can be changed using `SET NOT NULL` and `DROP NOT NULL`:
+
+```sql
+ALTER TABLE prod.db.sample ALTER COLUMN id DROP NOT NULL
+```
+
+{{< hint info >}}
+`ALTER COLUMN` is not used to update `struct` types. Use `ADD COLUMN` and `DROP COLUMN` to add or remove struct fields.
+{{< /hint >}}
+
+
+### `ALTER TABLE ... DROP COLUMN`
+
+To drop columns, use `ALTER TABLE ... DROP COLUMN`:
+
+```sql
+ALTER TABLE prod.db.sample DROP COLUMN id
+ALTER TABLE prod.db.sample DROP COLUMN point.z
+```
+
+## `ALTER TABLE` SQL extensions
+
+These commands are available in Spark 3.x when using Iceberg [SQL extensions](../spark-configuration#sql-extensions).
+
+### `ALTER TABLE ... ADD PARTITION FIELD`
+
+Iceberg supports adding new partition fields to a spec using `ADD PARTITION FIELD`:
+
+```sql
+ALTER TABLE prod.db.sample ADD PARTITION FIELD catalog -- identity transform
+```
+
+[Partition transforms](#partitioned-by) are also supported:
+
+```sql
+ALTER TABLE prod.db.sample ADD PARTITION FIELD bucket(16, id)
+ALTER TABLE prod.db.sample ADD PARTITION FIELD truncate(data, 4)
+ALTER TABLE prod.db.sample ADD PARTITION FIELD years(ts)
+-- use optional AS keyword to specify a custom name for the partition field 
+ALTER TABLE prod.db.sample ADD PARTITION FIELD bucket(16, id) AS shard
+```
+
+Adding a partition field is a metadata operation and does not change any of the existing table data. New data will be written with the new partitioning, but existing data will remain in the old partition layout. Old data files will have null values for the new partition fields in metadata tables.
+
+Dynamic partition overwrite behavior will change when the table's partitioning changes because dynamic overwrite replaces partitions implicitly. To overwrite explicitly, use the new `DataFrameWriterV2` API.
+
+{{< hint note >}}
+To migrate from daily to hourly partitioning with transforms, it is not necessary to drop the daily partition field. Keeping the field ensures existing metadata table queries continue to work.
+{{< /hint >}}
+
+{{< hint danger >}}
+**Dynamic partition overwrite behavior will change** when partitioning changes
+For example, if you partition by days and move to partitioning by hours, overwrites will overwrite hourly partitions but not days anymore.
+{{< /hint >}}
+
+### `ALTER TABLE ... DROP PARTITION FIELD`
+
+Partition fields can be removed using `DROP PARTITION FIELD`:
+
+```sql
+ALTER TABLE prod.db.sample DROP PARTITION FIELD catalog
+ALTER TABLE prod.db.sample DROP PARTITION FIELD bucket(16, id)
+ALTER TABLE prod.db.sample DROP PARTITION FIELD truncate(data, 4)
+ALTER TABLE prod.db.sample DROP PARTITION FIELD years(ts)
+ALTER TABLE prod.db.sample DROP PARTITION FIELD shard
+```
+
+Note that although the partition is removed, the column will still exist in the table schema.
+
+Dropping a partition field is a metadata operation and does not change any of the existing table data. New data will be written with the new partitioning, but existing data will remain in the old partition layout.
+
+{{< hint danger >}}
+**Dynamic partition overwrite behavior will change** when partitioning changes
+For example, if you partition by days and move to partitioning by hours, overwrites will overwrite hourly partitions but not days anymore.
+{{< /hint >}}
+
+{{< hint danger >}}
+Be careful when dropping a partition field because it will change the schema of metadata tables, like `files`, and may cause metadata queries to fail or produce different results.
+{{< /hint >}}
+
+### `ALTER TABLE ... WRITE ORDERED BY`
+
+Iceberg tables can be configured with a sort order that is used to automatically sort data that is written to the table in some engines. For example, `MERGE INTO` in Spark will use the table ordering.
+
+To set the write order for a table, use `WRITE ORDERED BY`:
+
+```sql
+ALTER TABLE prod.db.sample WRITE ORDERED BY category, id
+-- use optional ASC/DEC keyword to specify sort order of each field (default ASC)
+ALTER TABLE prod.db.sample WRITE ORDERED BY category ASC, id DESC
+-- use optional NULLS FIRST/NULLS LAST keyword to specify null order of each field (default FIRST)
+ALTER TABLE prod.db.sample WRITE ORDERED BY category ASC NULLS LAST, id DESC NULLS FIRST
+```
+
+{{< hint info >}}
+Table write order does not guarantee data order for queries. It only affects how data is written to the table.
+{{< /hint >}}
diff --git a/docs/content/docs/spark/spark-getting-started.md b/docs/content/docs/spark/spark-getting-started.md
new file mode 100644
index 0000000..b4b0af2
--- /dev/null
+++ b/docs/content/docs/spark/spark-getting-started.md
@@ -0,0 +1,144 @@
+---
+title: "Getting Started"
+weight: 200
+url: getting-started
+aliases:
+    - "spark/getting-started"
+---
+<!--
+ - Licensed to the Apache Software Foundation (ASF) under one or more
+ - contributor license agreements.  See the NOTICE file distributed with
+ - this work for additional information regarding copyright ownership.
+ - The ASF licenses this file to You under the Apache License, Version 2.0
+ - (the "License"); you may not use this file except in compliance with
+ - the License.  You may obtain a copy of the License at
+ -
+ -   http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing, software
+ - distributed under the License is distributed on an "AS IS" BASIS,
+ - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ - See the License for the specific language governing permissions and
+ - limitations under the License.
+ -->
+
+# Getting Started
+
+The latest version of Iceberg is [{{% icebergVersion %}}](../releases).
+
+Spark is currently the most feature-rich compute engine for Iceberg operations. 
+We recommend you to get started with Spark to understand Iceberg concepts and features with examples.
+You can also view documentations of using Iceberg with other compute engine under the **Engines** tab.
+
+## Using Iceberg in Spark 3
+
+To use Iceberg in a Spark shell, use the `--packages` option:
+
+```sh
+spark-shell --packages org.apache.iceberg:iceberg-spark3-runtime:{{% icebergVersion %}}
+```
+
+{{< hint info >}}
+If you want to include Iceberg in your Spark installation, add the [`iceberg-spark3-runtime` Jar][spark-runtime-jar] to Spark's `jars` folder.
+{{< /hint >}}
+
+[spark-runtime-jar]: https://search.maven.org/remotecontent?filepath=org/apache/iceberg/iceberg-spark3-runtime/{{% icebergVersion %}}/iceberg-spark3-runtime-{{% icebergVersion %}}.jar
+
+### Adding catalogs
+
+Iceberg comes with [catalogs](../spark-configuration#catalogs) that enable SQL commands to manage tables and load them by name. Catalogs are configured using properties under `spark.sql.catalog.(catalog_name)`.
+
+This command creates a path-based catalog named `local` for tables under `$PWD/warehouse` and adds support for Iceberg tables to Spark's built-in catalog:
+
+```sh
+spark-sql --packages org.apache.iceberg:iceberg-spark3-runtime:{{% icebergVersion %}}\
+    --conf spark.sql.extensions=org.apache.iceberg.spark.extensions.IcebergSparkSessionExtensions \
+    --conf spark.sql.catalog.spark_catalog=org.apache.iceberg.spark.SparkSessionCatalog \
+    --conf spark.sql.catalog.spark_catalog.type=hive \
+    --conf spark.sql.catalog.local=org.apache.iceberg.spark.SparkCatalog \
+    --conf spark.sql.catalog.local.type=hadoop \
+    --conf spark.sql.catalog.local.warehouse=$PWD/warehouse
+```
+
+### Creating a table
+
+To create your first Iceberg table in Spark, use the `spark-sql` shell or `spark.sql(...)` to run a [`CREATE TABLE`](../spark-ddl#create-table) command:
+
+```sql
+-- local is the path-based catalog defined above
+CREATE TABLE local.db.table (id bigint, data string) USING iceberg
+```
+
+Iceberg catalogs support the full range of SQL DDL commands, including:
+
+* [`CREATE TABLE ... PARTITIONED BY`](../spark-ddl#create-table)
+* [`CREATE TABLE ... AS SELECT`](../spark-ddl#create-table--as-select)
+* [`ALTER TABLE`](../spark-ddl#alter-table)
+* [`DROP TABLE`](../spark-ddl#drop-table)
+
+### Writing
+
+Once your table is created, insert data using [`INSERT INTO`](../spark-writes#insert-into):
+
+```sql
+INSERT INTO local.db.table VALUES (1, 'a'), (2, 'b'), (3, 'c');
+INSERT INTO local.db.table SELECT id, data FROM source WHERE length(data) = 1;
+```
+
+Iceberg also adds row-level SQL updates to Spark, [`MERGE INTO`](../spark-writes#merge-into) and [`DELETE FROM`](../spark-writes#delete-from):
+
+```sql
+MERGE INTO local.db.target t USING (SELECT * FROM updates) u ON t.id = u.id
+WHEN MATCHED THEN UPDATE SET t.count = t.count + u.count
+WHEN NOT MATCHED THEN INSERT *
+```
+
+Iceberg supports writing DataFrames using the new [v2 DataFrame write API](../spark-writes#writing-with-dataframes):
+
+```scala
+spark.table("source").select("id", "data")
+     .writeTo("local.db.table").append()
+```
+
+The old `write` API is supported, but _not_ recommended.
+
+### Reading
+
+To read with SQL, use the an Iceberg table name in a `SELECT` query:
+
+```sql
+SELECT count(1) as count, data
+FROM local.db.table
+GROUP BY data
+```
+
+SQL is also the recommended way to [inspect tables](../spark-queries#inspecting-tables). To view all of the snapshots in a table, use the `snapshots` metadata table:
+```sql
+SELECT * FROM local.db.table.snapshots
+```
+```
++-------------------------+----------------+-----------+-----------+----------------------------------------------------+-----+
+| committed_at            | snapshot_id    | parent_id | operation | manifest_list                                      | ... |
++-------------------------+----------------+-----------+-----------+----------------------------------------------------+-----+
+| 2019-02-08 03:29:51.215 | 57897183625154 | null      | append    | s3://.../table/metadata/snap-57897183625154-1.avro | ... |
+|                         |                |           |           |                                                    | ... |
+|                         |                |           |           |                                                    | ... |
+| ...                     | ...            | ...       | ...       | ...                                                | ... |
++-------------------------+----------------+-----------+-----------+----------------------------------------------------+-----+
+```
+
+[DataFrame reads](../spark-queries#querying-with-dataframes) are supported and can now reference tables by name using `spark.table`:
+
+```scala
+val df = spark.table("local.db.table")
+df.count()
+```
+
+### Next steps
+
+Next, you can learn more about Iceberg tables in Spark:
+
+* [DDL commands](../spark-ddl): `CREATE`, `ALTER`, and `DROP`
+* [Querying data](../spark-queries): `SELECT` queries and metadata tables
+* [Writing data](../spark-writes): `INSERT INTO` and `MERGE INTO`
+* [Maintaining tables](../spark-procedures) with stored procedures
diff --git a/docs/content/docs/spark/spark-procedures.md b/docs/content/docs/spark/spark-procedures.md
new file mode 100644
index 0000000..142aef1
--- /dev/null
+++ b/docs/content/docs/spark/spark-procedures.md
@@ -0,0 +1,458 @@
+---
+title: "Procedures"
+url: spark-procedures
+aliases:
+    - "spark/spark-procedures"
+---
+<!--
+ - Licensed to the Apache Software Foundation (ASF) under one or more
+ - contributor license agreements.  See the NOTICE file distributed with
+ - this work for additional information regarding copyright ownership.
+ - The ASF licenses this file to You under the Apache License, Version 2.0
+ - (the "License"); you may not use this file except in compliance with
+ - the License.  You may obtain a copy of the License at
+ -
+ -   http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing, software
+ - distributed under the License is distributed on an "AS IS" BASIS,
+ - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ - See the License for the specific language governing permissions and
+ - limitations under the License.
+ -->
+
+# Spark Procedures
+
+To use Iceberg in Spark, first configure [Spark catalogs](../spark-configuration). Stored procedures are only available when using [Iceberg SQL extensions](../spark-configuration#sql-extensions) in Spark 3.x.
+
+## Usage
+
+Procedures can be used from any configured Iceberg catalog with `CALL`. All procedures are in the namespace `system`.
+
+`CALL` supports passing arguments by name (recommended) or by position. Mixing position and named arguments is not supported.
+
+### Named arguments
+
+All procedure arguments are named. When passing arguments by name, arguments can be in any order and any optional argument can be omitted.
+
+```sql
+CALL catalog_name.system.procedure_name(arg_name_2 => arg_2, arg_name_1 => arg_1)
+```
+
+### Positional arguments
+
+When passing arguments by position, only the ending arguments may be omitted if they are optional.
+
+```sql
+CALL catalog_name.system.procedure_name(arg_1, arg_2, ... arg_n)
+```
+
+## Snapshot management
+
+### `rollback_to_snapshot`
+
+Roll back a table to a specific snapshot ID.
+
+To roll back to a specific time, use [`rollback_to_timestamp`](#rollback_to_timestamp).
+
+**Note** this procedure invalidates all cached Spark plans that reference the affected table.
+
+#### Usage
+
+| Argument Name | Required? | Type | Description |
+|---------------|-----------|------|-------------|
+| `table`       | ✔️  | string | Name of the table to update |
+| `snapshot_id` | ✔️  | long   | Snapshot ID to rollback to |
+
+#### Output
+
+| Output Name | Type | Description |
+| ------------|------|-------------|
+| `previous_snapshot_id` | long | The current snapshot ID before the rollback |
+| `current_snapshot_id`  | long | The new current snapshot ID |
+
+#### Example
+
+Roll back table `db.sample` to snapshot ID `1`:
+
+```sql
+CALL catalog_name.system.rollback_to_snapshot('db.sample', 1)
+```
+
+### `rollback_to_timestamp`
+
+Roll back a table to the snapshot that was current at some time.
+
+**Note** this procedure invalidates all cached Spark plans that reference the affected table.
+
+#### Usage
+
+| Argument Name | Required? | Type | Description |
+|---------------|-----------|------|-------------|
+| `table`       | ✔️  | string | Name of the table to update |
+| `timestamp`   | ✔️  | timestamp | A timestamp to rollback to |
+
+#### Output
+
+| Output Name | Type | Description |
+| ------------|------|-------------|
+| `previous_snapshot_id` | long | The current snapshot ID before the rollback |
+| `current_snapshot_id`  | long | The new current snapshot ID |
+
+#### Example
+
+Roll back `db.sample` to one day
+```sql
+CALL catalog_name.system.rollback_to_timestamp('db.sample', TIMESTAMP '2021-06-30 00:00:00.000')
+```
+
+### `set_current_snapshot`
+
+Sets the current snapshot ID for a table.
+
+Unlike rollback, the snapshot is not required to be an ancestor of the current table state.
+
+**Note** this procedure invalidates all cached Spark plans that reference the affected table.
+
+#### Usage
+
+| Argument Name | Required? | Type | Description |
+|---------------|-----------|------|-------------|
+| `table`       | ✔️  | string | Name of the table to update |
+| `snapshot_id` | ✔️  | long   | Snapshot ID to set as current |
+
+#### Output
+
+| Output Name | Type | Description |
+| ------------|------|-------------|
+| `previous_snapshot_id` | long | The current snapshot ID before the rollback |
+| `current_snapshot_id`  | long | The new current snapshot ID |
+
+#### Example
+
+Set the current snapshot for `db.sample` to 1:
+```sql
+CALL catalog_name.system.set_current_snapshot('db.sample', 1)
+```
+
+### `cherrypick_snapshot`
+
+Cherry-picks changes from a snapshot into the current table state.
+
+Cherry-picking creates a new snapshot from an existing snapshot without altering or removing the original.
+
+Only append and dynamic overwrite snapshots can be cherry-picked.
+
+**Note** this procedure invalidates all cached Spark plans that reference the affected table.
+
+#### Usage
+
+| Argument Name | Required? | Type | Description |
+|---------------|-----------|------|-------------|
+| `table`       | ✔️  | string | Name of the table to update |
+| `snapshot_id` | ✔️  | long | The snapshot ID to cherry-pick |
+
+#### Output
+
+| Output Name | Type | Description |
+| ------------|------|-------------|
+| `source_snapshot_id` | long | The table's current snapshot before the cherry-pick |
+| `current_snapshot_id` | long | The snapshot ID created by applying the cherry-pick |
+
+#### Examples
+
+Cherry-pick snapshot 1
+```sql
+CALL catalog_name.system.cherrypick_snapshot('my_table', 1)
+```
+
+Cherry-pick snapshot 1 with named args
+```sql
+CALL catalog_name.system.cherrypick_snapshot(snapshot_id => 1, table => 'my_table' )
+```
+
+
+## Metadata management
+
+Many [maintenance actions](../maintenance) can be performed using Iceberg stored procedures.
+
+### `expire_snapshots`
+
+Each write/update/delete/upsert/compaction in Iceberg produces a new snapshot while keeping the old data and metadata
+around for snapshot isolation and time travel. The `expire_snapshots` procedure can be used to remove older snapshots
+and their files which are no longer needed.
+
+This procedure will remove old snapshots and data files which are uniquely required by those old snapshots. This means
+the `expire_snapshots` procedure will never remove files which are still required by a non-expired snapshot.
+
+#### Usage
+
+| Argument Name | Required? | Type | Description |
+|---------------|-----------|------|-------------|
+| `table`       | ✔️  | string | Name of the table to update |
+| `older_than`  | ️   | timestamp | Timestamp before which snapshots will be removed (Default: 5 days ago) |
+| `retain_last` |    | int       | Number of ancestor snapshots to preserve regardless of `older_than` (defaults to 1) |
+
+#### Output
+
+| Output Name | Type | Description |
+| ------------|------|-------------|
+| `deleted_data_files_count` | long | Number of data files deleted by this operation |
+| `deleted_manifest_files_count` | long | Number of manifest files deleted by this operation |
+| `deleted_manifest_lists_count` | long | Number of manifest List files deleted by this operation |
+
+#### Examples
+
+Remove snapshots older than one day, but retain the last 100 snapshots:
+
+```sql
+CALL hive_prod.system.expire_snapshots('db.sample', TIMESTAMP '2021-06-30 00:00:00.000', 100)
+```
+
+Erase all snapshots older than the current timestamp but retain the last 5 snapshots:
+
+```sql
+CALL hive_prod.system.expire_snapshots(table => 'db.sample', older_than => now(), retain_last => 5)
+```
+
+### `remove_orphan_files`
+
+Used to remove files which are not referenced in any metadata files of an Iceberg table and can thus be considered "orphaned".
+
+#### Usage
+
+| Argument Name | Required? | Type | Description |
+|---------------|-----------|------|-------------|
+| `table`       | ✔️  | string | Name of the table to clean |
+| `older_than`  | ️   | timestamp | Remove orphan files created before this timestamp (Defaults to 3 days ago) |
+| `location`    |    | string    | Directory to look for files in (defaults to the table's location) |
+| `dry_run`     |    | boolean   | When true, don't actually remove files (defaults to false) |
+
+#### Output
+
+| Output Name | Type | Description |
+| ------------|------|-------------|
+| `orphan_file_location` | String | The path to each file determined to be an orphan by this command |
+
+#### Examples
+
+List all the files that are candidates for removal by performing a dry run of the `remove_orphan_files` command on this table without actually removing them:
+```sql
+CALL catalog_name.system.remove_orphan_files(table => 'db.sample', dry_run => true)
+```
+
+Remove any files in the `tablelocation/data` folder which are not known to the table `db.sample`.
+```sql
+CALL catalog_name.system.remove_orphan_files(table => 'db.sample', location => 'tablelocation/data')
+```
+
+### `rewrite_manifests`
+
+Rewrite manifests for a table to optimize scan planning.
+
+Data files in manifests are sorted by fields in the partition spec. This procedure runs in parallel using a Spark job.
+
+See the [`RewriteManifestsAction` Javadoc](../javadoc/{{% icebergVersion %}}/org/apache/iceberg/actions/RewriteManifestsAction.html)
+to see more configuration options.
+
+**Note** this procedure invalidates all cached Spark plans that reference the affected table.
+
+#### Usage
+
+| Argument Name | Required? | Type | Description |
+|---------------|-----------|------|-------------|
+| `table`       | ✔️  | string | Name of the table to update |
+| `use_caching` | ️   | boolean | Use Spark caching during operation (defaults to true) |
+
+#### Output
+
+| Output Name | Type | Description |
+| ------------|------|-------------|
+| `rewritten_manifests_count` | int | Number of manifests which were re-written by this command |
+| `added_mainfests_count`     | int | Number of new manifest files which were written by this command |
+
+#### Examples
+
+Rewrite the manifests in table `db.sample` and align manifest files with table partitioning.
+```sql
+CALL catalog_name.system.rewrite_manifests('db.sample')
+```
+
+Rewrite the manifests in table `db.sample` and disable the use of Spark caching. This could be done to avoid memory issues on executors.
+```sql
+CALL catalog_name.system.rewrite_manifests('db.sample', false)
+```
+
+## Table migration
+
+The `snapshot` and `migrate` procedures help test and migrate existing Hive or Spark tables to Iceberg.
+
+### `snapshot`
+
+Create a light-weight temporary copy of a table for testing, without changing the source table.
+
+The newly created table can be changed or written to without affecting the source table, but the snapshot uses the original table's data files.
+
+When inserts or overwrites run on the snapshot, new files are placed in the snapshot table's location rather than the original table location.
+
+When finished testing a snapshot table, clean it up by running `DROP TABLE`.
+
+**Note** Because tables created by `snapshot` are not the sole owners of their data files, they are prohibited from
+actions like `expire_snapshots` which would physically delete data files. Iceberg deletes, which only effect metadata,
+are still allowed. In addition, any operations which affect the original data files will disrupt the Snapshot's 
+integrity. DELETE statements executed against the original Hive table will remove original data files and the
+`snapshot` table will no longer be able to access them.
+
+See [`migrate`](#migrate) to replace an existing table with an Iceberg table.
+
+#### Usage
+
+| Argument Name | Required? | Type | Description |
+|---------------|-----------|------|-------------|
+| `source_table`| ✔️  | string | Name of the table to snapshot |
+| `table`       | ✔️  | string | Name of the new Iceberg table to create |
+| `location`    |    | string | Table location for the new table (delegated to the catalog by default) |
+| `properties`  | ️   | map<string, string> | Properties to add to the newly created table |
+
+#### Output
+
+| Output Name | Type | Description |
+| ------------|------|-------------|
+| `imported_files_count` | long | Number of files added to the new table |
+
+#### Examples
+
+Make an isolated Iceberg table which references table `db.sample` named `db.snap` at the
+catalog's default location for `db.snap`.
+```sql
+CALL catalog_name.system.snapshot('db.sample', 'db.snap')
+```
+
+Migrate an isolated Iceberg table which references table `db.sample` named `db.snap` at
+a manually specified location `/tmp/temptable/`.
+```sql
+CALL catalog_name.system.snapshot('db.sample', 'db.snap', '/tmp/temptable/')
+```
+
+### `migrate`
+
+Replace a table with an Iceberg table, loaded with the source's data files.
+
+Table schema, partitioning, properties, and location will be copied from the source table.
+
+Migrate will fail if any table partition uses an unsupported format. Supported formats are Avro, Parquet, and ORC.
+Existing data files are added to the Iceberg table's metadata and can be read using a name-to-id mapping created from the original table schema.
+
+To leave the original table intact while testing, use [`snapshot`](#snapshot) to create new temporary table that shares source data files and schema.
+
+#### Usage
+
+| Argument Name | Required? | Type | Description |
+|---------------|-----------|------|-------------|
+| `table`       | ✔️  | string | Name of the table to migrate |
+| `properties`  | ️   | map<string, string> | Properties for the new Iceberg table |
+
+#### Output
+
+| Output Name | Type | Description |
+| ------------|------|-------------|
+| `migrated_files_count` | long | Number of files appended to the Iceberg table |
+
+#### Examples
+
+Migrate the table `db.sample` in Spark's default catalog to an Iceberg table and add a property 'foo' set to 'bar':
+
+```sql
+CALL catalog_name.system.migrate('spark_catalog.db.sample', map('foo', 'bar'))
+```
+
+Migrate `db.sample` in the current catalog to an Iceberg table without adding any additional properties:
+```sql
+CALL catalog_name.system.migrate('db.sample')
+```
+
+### `add_files`
+
+Attempts to directly add files from a Hive or file based table into a given Iceberg table. Unlike migrate or
+snapshot, `add_files` can import files from a specific partition or partitions and does not create a new Iceberg table.
+This command will create metadata for the new files and will not move them. This procedure will not analyze the schema 
+of the files to determine if they actually match the schema of the Iceberg table. Upon completion, the Iceberg table 
+will then treat these files as if they are part of the set of files  owned by Iceberg. This means any subsequent 
+`expire_snapshot` calls will be able to physically delete the added files. This method should not be used if 
+`migrate` or `snapshot` are possible.
+
+#### Usage
+
+| Argument Name | Required? | Type | Description |
+|---------------|-----------|------|-------------|
+| `table`       | ✔️  | string | Table which will have files added to|
+| `source_table`| ✔️  | string | Table where files should come from, paths are also possible in the form of \`file_format\`.\`path\` |
+| `partition_filter`  | ️   | map<string, string> | A map of partitions in the source table to import from |
+
+Warning : Schema is not validated, adding files with different schema to the Iceberg table will cause issues.
+
+Warning : Files added by this method can be physically deleted by Iceberg operations
+
+#### Examples
+
+Add the files from table `db.src_table`, a Hive or Spark table registered in the session Catalog, to Iceberg table
+`db.tbl`. Only add files that exist within partitions where `part_col_1` is equal to `A`.
+```sql
+CALL spark_catalog.system.add_files(
+table => 'db.tbl',
+source_table => 'db.src_tbl',
+partition_filter => map('part_col_1', 'A')
+)
+```
+
+Add files from a `parquet` file based table at location `path/to/table` to the Iceberg table `db.tbl`. Add all
+files regardless of what partition they belong to.
+```sql
+CALL spark_catalog.system.add_files(
+  table => 'db.tbl',
+  source_table => '`parquet`.`path/to/table`'
+)
+```
+
+## `Metadata information`
+
+### `ancestors_of`
+
+Report the live snapshot IDs of parents of a specified snapshot
+
+#### Usage
+
+| Argument Name | Required? | Type | Description |
+|---------------|-----------|------|-------------|
+| `table`       | ✔️  | string | Name of the table to report live snapshot IDs |
+| `snapshot_id` |  ️  | long | Use a specified snapshot to get the live snapshot IDs of parents |
+
+> tip : Using snapshot_id
+> 
+> Given snapshots history with roll back to B and addition of C' -> D'
+> ```shell
+> A -> B - > C -> D
+>       \ -> C' -> (D')
+> ```
+> Not specifying the snapshot ID would return A -> B -> C' -> D', while providing the snapshot ID of
+> D as an argument would return A-> B -> C -> D
+
+#### Output
+
+| Output Name | Type | Description |
+| ------------|------|-------------|
+| `snapshot_id` | long | the ancestor snapshot id |
+| `timestamp` | long | snapshot creation time |
+
+#### Examples
+
+Get all the snapshot ancestors of current snapshots(default)
+```sql
+CALL spark_catalog.system.ancestors_of('db.tbl')
+```
+
+Get all the snapshot ancestors by a particular snapshot
+```sql
+CALL spark_catalog.system.ancestors_of('db.tbl', 1)
+CALL spark_catalog.system.ancestors_of(snapshot_id => 1, table => 'db.tbl')
+```
diff --git a/docs/content/docs/spark/spark-queries.md b/docs/content/docs/spark/spark-queries.md
new file mode 100644
index 0000000..76eca31
--- /dev/null
+++ b/docs/content/docs/spark/spark-queries.md
@@ -0,0 +1,265 @@
+---
+title: "Queries"
+url: spark-queries
+aliases:
+    - "spark/spark-queries"
+---
+<!--
+ - Licensed to the Apache Software Foundation (ASF) under one or more
+ - contributor license agreements.  See the NOTICE file distributed with
+ - this work for additional information regarding copyright ownership.
+ - The ASF licenses this file to You under the Apache License, Version 2.0
+ - (the "License"); you may not use this file except in compliance with
+ - the License.  You may obtain a copy of the License at
+ -
+ -   http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing, software
+ - distributed under the License is distributed on an "AS IS" BASIS,
+ - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ - See the License for the specific language governing permissions and
+ - limitations under the License.
+ -->
+
+# Spark Queries
+
+To use Iceberg in Spark, first configure [Spark catalogs](../spark-configuration).
+
+Iceberg uses Apache Spark's DataSourceV2 API for data source and catalog implementations. Spark DSv2 is an evolving API with different levels of support in Spark versions:
+
+| Feature support                                  | Spark 3.0| Spark 2.4  | Notes                                          |
+|--------------------------------------------------|----------|------------|------------------------------------------------|
+| [`SELECT`](#querying-with-sql)                   | ✔️        |            |                                                |
+| [DataFrame reads](#querying-with-dataframes)     | ✔️        | ✔️          |                                                |
+| [Metadata table `SELECT`](#inspecting-tables)    | ✔️        |            |                                                |
+| [History metadata table](#history)               | ✔️        | ✔️          |                                                |
+| [Snapshots metadata table](#snapshots)           | ✔️        | ✔️          |                                                |
+| [Files metadata table](#files)                   | ✔️        | ✔️          |                                                |
+| [Manifests metadata table](#manifests)           | ✔️        | ✔️          |                                                |
+
+
+## Querying with SQL
+
+In Spark 3, tables use identifiers that include a [catalog name](../spark-configuration#using-catalogs).
+
+```sql
+SELECT * FROM prod.db.table -- catalog: prod, namespace: db, table: table
+```
+
+Metadata tables, like `history` and `snapshots`, can use the Iceberg table name as a namespace.
+
+For example, to read from the `files` metadata table for `prod.db.table`, run:
+
+```
+SELECT * FROM prod.db.table.files
+```
+
+## Querying with DataFrames
+
+To load a table as a DataFrame, use `table`:
+
+```scala
+val df = spark.table("prod.db.table")
+```
+
+### Catalogs with DataFrameReader
+
+Iceberg 0.11.0 adds multi-catalog support to `DataFrameReader` in both Spark 3.x and 2.4.
+
+Paths and table names can be loaded with Spark's `DataFrameReader` interface. How tables are loaded depends on how
+the identifier is specified. When using `spark.read.format("iceberg").path(table)` or `spark.table(table)` the `table`
+variable can take a number of forms as listed below:
+
+*  `file:/path/to/table`: loads a HadoopTable at given path
+*  `tablename`: loads `currentCatalog.currentNamespace.tablename`
+*  `catalog.tablename`: loads `tablename` from the specified catalog.
+*  `namespace.tablename`: loads `namespace.tablename` from current catalog
+*  `catalog.namespace.tablename`: loads `namespace.tablename` from the specified catalog.
+*  `namespace1.namespace2.tablename`: loads `namespace1.namespace2.tablename` from current catalog
+
+The above list is in order of priority. For example: a matching catalog will take priority over any namespace resolution.
+
+
+### Time travel
+
+To select a specific table snapshot or the snapshot at some time, Iceberg supports two Spark read options:
+
+* `snapshot-id` selects a specific table snapshot
+* `as-of-timestamp` selects the current snapshot at a timestamp, in milliseconds
+
+```scala
+// time travel to October 26, 1986 at 01:21:00
+spark.read
+    .option("as-of-timestamp", "499162860000")
+    .format("iceberg")
+    .load("path/to/table")
+```
+
+```scala
+// time travel to snapshot with ID 10963874102873L
+spark.read
+    .option("snapshot-id", 10963874102873L)
+    .format("iceberg")
+    .load("path/to/table")
+```
+
+{{< hint info >}}
+Spark does not currently support using `option` with `table` in DataFrameReader commands. All options will be silently 
+ignored. Do not use `table` when attempting to time-travel or use other options. Options will be supported with `table`
+in [Spark 3.1 - SPARK-32592](https://issues.apache.org/jira/browse/SPARK-32592).
+{{< /hint >}}
+
+Time travel is not yet supported by Spark's SQL syntax.
+
+### Spark 2.4
+
+Spark 2.4 requires using the DataFrame reader with `iceberg` as a format, because 2.4 does not support direct SQL queries:
+
+```scala
+// named metastore table
+spark.read.format("iceberg").load("catalog.db.table")
+// Hadoop path table
+spark.read.format("iceberg").load("hdfs://nn:8020/path/to/table")
+```
+
+#### Spark 2.4 with SQL
+
+To run SQL `SELECT` statements on Iceberg tables in 2.4, register the DataFrame as a temporary table:
+
+```scala
+val df = spark.read.format("iceberg").load("db.table")
+df.createOrReplaceTempView("table")
+
+spark.sql("""select count(1) from table""").show()
+```
+
+
+## Inspecting tables
+
+To inspect a table's history, snapshots, and other metadata, Iceberg supports metadata tables.
+
+Metadata tables are identified by adding the metadata table name after the original table name. For example, history for `db.table` is read using `db.table.history`.
+
+{{< hint info >}}
+As of Spark 3.0, the format of the table name for inspection (`catalog.database.table.metadata`) doesn't work with Spark's default catalog (`spark_catalog`). If you've replaced the default catalog, you may want to use `DataFrameReader` API to inspect the table. 
+{{< /hint >}}
+
+### History
+
+To show table history, run:
+
+```sql
+SELECT * FROM prod.db.table.history
+```
+```text
++-------------------------+---------------------+---------------------+---------------------+
+| made_current_at         | snapshot_id         | parent_id           | is_current_ancestor |
++-------------------------+---------------------+---------------------+---------------------+
+| 2019-02-08 03:29:51.215 | 5781947118336215154 | NULL                | true                |
+| 2019-02-08 03:47:55.948 | 5179299526185056830 | 5781947118336215154 | true                |
+| 2019-02-09 16:24:30.13  | 296410040247533544  | 5179299526185056830 | false               |
+| 2019-02-09 16:32:47.336 | 2999875608062437330 | 5179299526185056830 | true                |
+| 2019-02-09 19:42:03.919 | 8924558786060583479 | 2999875608062437330 | true                |
+| 2019-02-09 19:49:16.343 | 6536733823181975045 | 8924558786060583479 | true                |
++-------------------------+---------------------+---------------------+---------------------+
+```
+
+{{< hint info >}}
+**This shows a commit that was rolled back.** The example has two snapshots with the same parent, and one is *not* an ancestor of the current table state.
+{{< /hint >}}
+
+### Snapshots
+
+To show the valid snapshots for a table, run:
+
+```sql
+SELECT * FROM prod.db.table.snapshots
+```
+```text
++-------------------------+----------------+-----------+-----------+----------------------------------------------------+-------------------------------------------------------+
+| committed_at            | snapshot_id    | parent_id | operation | manifest_list                                      | summary                                               |
++-------------------------+----------------+-----------+-----------+----------------------------------------------------+-------------------------------------------------------+
+| 2019-02-08 03:29:51.215 | 57897183625154 | null      | append    | s3://.../table/metadata/snap-57897183625154-1.avro | { added-records -> 2478404, total-records -> 2478404, |
+|                         |                |           |           |                                                    |   added-data-files -> 438, total-data-files -> 438,   |
+|                         |                |           |           |                                                    |   spark.app.id -> application_1520379288616_155055 }  |
+| ...                     | ...            | ...       | ...       | ...                                                | ...                                                   |
++-------------------------+----------------+-----------+-----------+----------------------------------------------------+-------------------------------------------------------+
+```
+
+You can also join snapshots to table history. For example, this query will show table history, with the application ID that wrote each snapshot:
+
+```sql
+select
+    h.made_current_at,
+    s.operation,
+    h.snapshot_id,
+    h.is_current_ancestor,
+    s.summary['spark.app.id']
+from prod.db.table.history h
+join prod.db.table.snapshots s
+  on h.snapshot_id = s.snapshot_id
+order by made_current_at
+```
+```text
++-------------------------+-----------+----------------+---------------------+----------------------------------+
+| made_current_at         | operation | snapshot_id    | is_current_ancestor | summary[spark.app.id]            |
++-------------------------+-----------+----------------+---------------------+----------------------------------+
+| 2019-02-08 03:29:51.215 | append    | 57897183625154 | true                | application_1520379288616_155055 |
+| 2019-02-09 16:24:30.13  | delete    | 29641004024753 | false               | application_1520379288616_151109 |
+| 2019-02-09 16:32:47.336 | append    | 57897183625154 | true                | application_1520379288616_155055 |
+| 2019-02-08 03:47:55.948 | overwrite | 51792995261850 | true                | application_1520379288616_152431 |
++-------------------------+-----------+----------------+---------------------+----------------------------------+
+```
+
+### Files
+
+To show a table's data files and each file's metadata, run:
+
+```sql
+SELECT * FROM prod.db.table.files
+```
+```text
++-------------------------------------------------------------------------+-------------+--------------+--------------------+--------------------+------------------+-------------------+------------------+-----------------+-----------------+--------------+---------------+
+| file_path                                                               | file_format | record_count | file_size_in_bytes | column_sizes       | value_counts     | null_value_counts | nan_value_counts | lower_bounds    | upper_bounds    | key_metadata | split_offsets |
++-------------------------------------------------------------------------+-------------+--------------+--------------------+--------------------+------------------+-------------------+------------------+-----------------+-----------------+--------------+---------------+
+| s3:/.../table/data/00000-3-8d6d60e8-d427-4809-bcf0-f5d45a4aad96.parquet | PARQUET     | 1            | 597                | [1 -> 90, 2 -> 62] | [1 -> 1, 2 -> 1] | [1 -> 0, 2 -> 0]  | []               | [1 -> , 2 -> c] | [1 -> , 2 -> c] | null         | [4]           |
+| s3:/.../table/data/00001-4-8d6d60e8-d427-4809-bcf0-f5d45a4aad96.parquet | PARQUET     | 1            | 597                | [1 -> 90, 2 -> 62] | [1 -> 1, 2 -> 1] | [1 -> 0, 2 -> 0]  | []               | [1 -> , 2 -> b] | [1 -> , 2 -> b] | null         | [4]           |
+| s3:/.../table/data/00002-5-8d6d60e8-d427-4809-bcf0-f5d45a4aad96.parquet | PARQUET     | 1            | 597                | [1 -> 90, 2 -> 62] | [1 -> 1, 2 -> 1] | [1 -> 0, 2 -> 0]  | []               | [1 -> , 2 -> a] | [1 -> , 2 -> a] | null         | [4]           |
++-------------------------------------------------------------------------+-------------+--------------+--------------------+--------------------+------------------+-------------------+------------------+-----------------+-----------------+--------------+---------------+
+```
+
+### Manifests
+
+To show a table's file manifests and each file's metadata, run:
+
+```sql
+SELECT * FROM prod.db.table.manifests
+```
+```text
++----------------------------------------------------------------------+--------+-------------------+---------------------+------------------------+---------------------------+--------------------------+--------------------------------------+
+| path                                                                 | length | partition_spec_id | added_snapshot_id   | added_data_files_count | existing_data_files_count | deleted_data_files_count | partition_summaries                  |
++----------------------------------------------------------------------+--------+-------------------+---------------------+------------------------+---------------------------+--------------------------+--------------------------------------+
+| s3://.../table/metadata/45b5290b-ee61-4788-b324-b1e2735c0e10-m0.avro | 4479   | 0                 | 6668963634911763636 | 8                      | 0                         | 0                        | [[false,null,2019-05-13,2019-05-15]] |
++----------------------------------------------------------------------+--------+-------------------+---------------------+------------------------+---------------------------+--------------------------+--------------------------------------+
+```
+
+Note: 
+1. Fields within `partition_summaries` column of the manifests table correspond to `field_summary` structs within [manifest list](../spec#manifest-lists), with the following order: 
+   - `contains_null`
+   - `contains_nan`
+   - `lower_bound`
+   - `upper_bound`
+2. `contains_nan` could return null, which indicates that this information is not available from files' metadata. 
+   This usually occurs when reading from V1 table, where `contains_nan` is not populated. 
+
+## Inspecting with DataFrames
+
+Metadata tables can be loaded in Spark 2.4 or Spark 3 using the DataFrameReader API:
+
+```scala
+// named metastore table
+spark.read.format("iceberg").load("db.table.files").show(truncate = false)
+// Hadoop path table
+spark.read.format("iceberg").load("hdfs://nn:8020/path/to/table#files").show(truncate = false)
+```
+
diff --git a/docs/content/docs/spark/spark-structured-streaming.md b/docs/content/docs/spark/spark-structured-streaming.md
new file mode 100644
index 0000000..8679847
--- /dev/null
+++ b/docs/content/docs/spark/spark-structured-streaming.md
@@ -0,0 +1,113 @@
+---
+title: "Structured Streaming"
+url: spark-structured-streaming
+aliases:
+    - "spark/spark-structured-streaming"
+---
+<!--
+ - Licensed to the Apache Software Foundation (ASF) under one or more
+ - contributor license agreements.  See the NOTICE file distributed with
+ - this work for additional information regarding copyright ownership.
+ - The ASF licenses this file to You under the Apache License, Version 2.0
+ - (the "License"); you may not use this file except in compliance with
+ - the License.  You may obtain a copy of the License at
+ -
+ -   http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing, software
+ - distributed under the License is distributed on an "AS IS" BASIS,
+ - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ - See the License for the specific language governing permissions and
+ - limitations under the License.
+ -->
+
+# Spark Structured Streaming
+
+Iceberg uses Apache Spark's DataSourceV2 API for data source and catalog implementations. Spark DSv2 is an evolving API
+with different levels of support in Spark versions.
+
+As of Spark 3.0, DataFrame reads and writes are supported.
+
+| Feature support                                  | Spark 3.0| Spark 2.4  | Notes                                          |
+|--------------------------------------------------|----------|------------|------------------------------------------------|
+| [DataFrame write](#streaming-writes)             | ✔        | ✔          |                                                |
+
+## Streaming Writes
+
+To write values from streaming query to Iceberg table, use `DataStreamWriter`:
+
+```scala
+val tableIdentifier: String = ...
+data.writeStream
+    .format("iceberg")
+    .outputMode("append")
+    .trigger(Trigger.ProcessingTime(1, TimeUnit.MINUTES))
+    .option("path", tableIdentifier)
+    .option("checkpointLocation", checkpointPath)
+    .start()
+```
+
+The `tableIdentifier` can be:
+
+* The fully-qualified path to a HDFS table, like `hdfs://nn:8020/path/to/table`
+* A table name if the table is tracked by a catalog, like `database.table_name`
+
+Iceberg doesn't support "continuous processing", as it doesn't provide the interface to "commit" the output.
+
+Iceberg supports `append` and `complete` output modes:
+
+* `append`: appends the rows of every micro-batch to the table
+* `complete`: replaces the table contents every micro-batch
+
+The table should be created in prior to start the streaming query. Refer [SQL create table](../spark-ddl/#create-table)
+on Spark page to see how to create the Iceberg table.
+
+### Writing against partitioned table
+
+Iceberg requires the data to be sorted according to the partition spec per task (Spark partition) in prior to write
+against partitioned table. For batch queries you're encouraged to do explicit sort to fulfill the requirement
+(see [here](../spark-writes/#writing-to-partitioned-tables)), but the approach would bring additional latency as
+repartition and sort are considered as heavy operations for streaming workload. To avoid additional latency, you can
+enable fanout writer to eliminate the requirement.
+
+```scala
+val tableIdentifier: String = ...
+data.writeStream
+    .format("iceberg")
+    .outputMode("append")
+    .trigger(Trigger.ProcessingTime(1, TimeUnit.MINUTES))
+    .option("path", tableIdentifier)
+    .option("fanout-enabled", "true")
+    .option("checkpointLocation", checkpointPath)
+    .start()
+```
+
+Fanout writer opens the files per partition value and doesn't close these files till write task is finished.
+This functionality is discouraged for batch query, as explicit sort against output rows isn't expensive for batch workload.
+
+## Maintenance for streaming tables
+
+Streaming queries can create new table versions quickly, which creates lots of table metadata to track those versions.
+Maintaining metadata by tuning the rate of commits, expiring old snapshots, and automatically cleaning up metadata files
+is highly recommended.
+
+### Tune the rate of commits
+
+Having high rate of commits would produce lots of data files, manifests, and snapshots which leads the table hard
+to maintain. We encourage having trigger interval 1 minute at minimum, and increase the interval if needed.
+
+The triggers section in [Structured Streaming Programming Guide](https://spark.apache.org/docs/latest/structured-streaming-programming-guide.html#triggers)
+documents how to configure the interval.
+
+### Expire old snapshots
+
+Each micro-batch written to a table produces a new snapshot, which are tracked in table metadata until they are expired to remove the metadata and any data files that are no longer needed. Snapshots accumulate quickly with frequent commits, so it is highly recommended that tables written by streaming queries are [regularly maintained](../maintenance#expire-snapshots).
+
+### Compacting data files
+
+The amount of data written in a micro batch is typically small, which can cause the table metadata to track lots of small files. [Compacting small files into larger files](../maintenance#compact-data-files) reduces the metadata needed by the table, and increases query efficiency.
+
+### Rewrite manifests
+
+To optimize write latency on streaming workload, Iceberg may write the new snapshot with a "fast" append that does not automatically compact manifests.
+This could lead lots of small manifest files. Manifests can be [rewritten to optimize queries and to compact](../maintenance#rewrite-manifests).
diff --git a/docs/content/docs/spark/spark-writes.md b/docs/content/docs/spark/spark-writes.md
new file mode 100644
index 0000000..30c43aa
--- /dev/null
+++ b/docs/content/docs/spark/spark-writes.md
@@ -0,0 +1,457 @@
+---
+title: "Writes"
+url: spark-writes
+aliases:
+    - "spark/spark-writes"
+---
+<!--
+ - Licensed to the Apache Software Foundation (ASF) under one or more
+ - contributor license agreements.  See the NOTICE file distributed with
+ - this work for additional information regarding copyright ownership.
+ - The ASF licenses this file to You under the Apache License, Version 2.0
+ - (the "License"); you may not use this file except in compliance with
+ - the License.  You may obtain a copy of the License at
+ -
+ -   http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing, software
+ - distributed under the License is distributed on an "AS IS" BASIS,
+ - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ - See the License for the specific language governing permissions and
+ - limitations under the License.
+ -->
+
+# Spark Writes
+
+To use Iceberg in Spark, first configure [Spark catalogs](../spark-configuration).
+
+Some plans are only available when using [Iceberg SQL extensions](../spark-configuration#sql-extensions) in Spark 3.x.
+
+Iceberg uses Apache Spark's DataSourceV2 API for data source and catalog implementations. Spark DSv2 is an evolving API with different levels of support in Spark versions:
+
+| Feature support                                  | Spark 3.0| Spark 2.4  | Notes                                          |
+|--------------------------------------------------|----------|------------|------------------------------------------------|
+| [SQL insert into](#insert-into)                  | ✔️        |            |                                                |
+| [SQL merge into](#merge-into)                    | ✔️        |            | ⚠ Requires Iceberg Spark extensions            |
+| [SQL insert overwrite](#insert-overwrite)        | ✔️        |            |                                                |
+| [SQL delete from](#delete-from)                  | ✔️        |            | ⚠ Row-level delete requires Spark extensions   |
+| [SQL update](#update)                            | ✔️        |            | ⚠ Requires Iceberg Spark extensions            |
+| [DataFrame append](#appending-data)              | ✔️        | ✔️          |                                                |
+| [DataFrame overwrite](#overwriting-data)         | ✔️        | ✔️          | ⚠ Behavior changed in Spark 3.0                |
+| [DataFrame CTAS and RTAS](#creating-tables)      | ✔️        |            |                                                |
+
+
+## Writing with SQL
+
+Spark 3 supports SQL `INSERT INTO`, `MERGE INTO`, and `INSERT OVERWRITE`, as well as the new `DataFrameWriterV2` API.
+
+### `INSERT INTO`
+
+To append new data to a table, use `INSERT INTO`.
+
+```sql
+INSERT INTO prod.db.table VALUES (1, 'a'), (2, 'b')
+```
+```sql
+INSERT INTO prod.db.table SELECT ...
+```
+
+### `MERGE INTO`
+
+Spark 3 added support for `MERGE INTO` queries that can express row-level updates.
+
+Iceberg supports `MERGE INTO` by rewriting data files that contain rows that need to be updated in an `overwrite` commit.
+
+**`MERGE INTO` is recommended instead of `INSERT OVERWRITE`** because Iceberg can replace only the affected data files, and because the data overwritten by a dynamic overwrite may change if the table's partitioning changes.
+
+
+#### `MERGE INTO` syntax
+
+`MERGE INTO` updates a table, called the _target_ table, using a set of updates from another query, called the _source_. The update for a row in the target table is found using the `ON` clause that is like a join condition.
+
+```sql
+MERGE INTO prod.db.target t   -- a target table
+USING (SELECT ...) s          -- the source updates
+ON t.id = s.id                -- condition to find updates for target rows
+WHEN ...                      -- updates
+```
+
+Updates to rows in the target table are listed using `WHEN MATCHED ... THEN ...`. Multiple `MATCHED` clauses can be added with conditions that determine when each match should be applied. The first matching expression is used.
+
+```sql
+WHEN MATCHED AND s.op = 'delete' THEN DELETE
+WHEN MATCHED AND t.count IS NULL AND s.op = 'increment' THEN UPDATE SET t.count = 0
+WHEN MATCHED AND s.op = 'increment' THEN UPDATE SET t.count = t.count + 1
+```
+
+Source rows (updates) that do not match can be inserted:
+
+```sql
+WHEN NOT MATCHED THEN INSERT *
+```
+
+Inserts also support additional conditions:
+
+```sql
+WHEN NOT MATCHED AND s.event_time > still_valid_threshold THEN INSERT (id, count) VALUES (s.id, 1)
+```
+
+Only one record in the source data can update any given row of the target table, or else an error will be thrown.
+
+
+### `INSERT OVERWRITE`
+
+`INSERT OVERWRITE` can replace data in the table with the result of a query. Overwrites are atomic operations for Iceberg tables.
+
+The partitions that will be replaced by `INSERT OVERWRITE` depends on Spark's partition overwrite mode and the partitioning of a table. `MERGE INTO` can rewrite only affected data files and has more easily understood behavior, so it is recommended instead of `INSERT OVERWRITE`.
+
+{{< hint danger >}}
+Spark 3.0.0 has a correctness bug that affects dynamic `INSERT OVERWRITE` with hidden partitioning, [SPARK-32168][spark-32168].
+For tables with [hidden partitions](../partitioning), make sure you use Spark 3.0.1.
+{{< /hint >}}
+
+[spark-32168]: https://issues.apache.org/jira/browse/SPARK-32168
+
+
+#### Overwrite behavior
+
+Spark's default overwrite mode is **static**, but **dynamic overwrite mode is recommended when writing to Iceberg tables.** Static overwrite mode determines which partitions to overwrite in a table by converting the `PARTITION` clause to a filter, but the `PARTITION` clause can only reference table columns.
+
+Dynamic overwrite mode is configured by setting `spark.sql.sources.partitionOverwriteMode=dynamic`.
+
+To demonstrate the behavior of dynamic and static overwrites, consider a `logs` table defined by the following DDL:
+
+```sql
+CREATE TABLE prod.my_app.logs (
+    uuid string NOT NULL,
+    level string NOT NULL,
+    ts timestamp NOT NULL,
+    message string)
+USING iceberg
+PARTITIONED BY (level, hours(ts))
+```
+
+#### Dynamic overwrite
+
+When Spark's overwrite mode is dynamic, partitions that have rows produced by the `SELECT` query will be replaced.
+
+For example, this query removes duplicate log events from the example `logs` table.
+
+```sql
+INSERT OVERWRITE prod.my_app.logs
+SELECT uuid, first(level), first(ts), first(message)
+FROM prod.my_app.logs
+WHERE cast(ts as date) = '2020-07-01'
+GROUP BY uuid
+```
+
+In dynamic mode, this will replace any partition with rows in the `SELECT` result. Because the date of all rows is restricted to 1 July, only hours of that day will be replaced.
+
+#### Static overwrite
+
+When Spark's overwrite mode is static, the `PARTITION` clause is converted to a filter that is used to delete from the table. If the `PARTITION` clause is omitted, all partitions will be replaced.
+
+Because there is no `PARTITION` clause in the query above, it will drop all existing rows in the table when run in static mode, but will only write the logs from 1 July.
+
+To overwrite just the partitions that were loaded, add a `PARTITION` clause that aligns with the `SELECT` query filter:
+
+```sql
+INSERT OVERWRITE prod.my_app.logs
+PARTITION (level = 'INFO')
+SELECT uuid, first(level), first(ts), first(message)
+FROM prod.my_app.logs
+WHERE level = 'INFO'
+GROUP BY uuid
+```
+
+Note that this mode cannot replace hourly partitions like the dynamic example query because the `PARTITION` clause can only reference table columns, not hidden partitions.
+
+### `DELETE FROM`
+
+Spark 3 added support for `DELETE FROM` queries to remove data from tables.
+
+Delete queries accept a filter to match rows to delete.
+
+```sql
+DELETE FROM prod.db.table
+WHERE ts >= '2020-05-01 00:00:00' and ts < '2020-06-01 00:00:00'
+
+DELETE FROM prod.db.all_events
+WHERE session_time < (SELECT min(session_time) FROM prod.db.good_events)
+
+DELETE FROM prod.db.orders AS t1
+WHERE EXISTS (SELECT oid FROM prod.db.returned_orders WHERE t1.oid = oid)
+```
+
+If the delete filter matches entire partitions of the table, Iceberg will perform a metadata-only delete. If the filter matches individual rows of a table, then Iceberg will rewrite only the affected data files.
+
+### `UPDATE`
+
+Spark 3.1 added support for `UPDATE` queries that update matching rows in tables.
+
+Update queries accept a filter to match rows to update.
+
+```sql
+UPDATE prod.db.table
+SET c1 = 'update_c1', c2 = 'update_c2'
+WHERE ts >= '2020-05-01 00:00:00' and ts < '2020-06-01 00:00:00'
+
+UPDATE prod.db.all_events
+SET session_time = 0, ignored = true
+WHERE session_time < (SELECT min(session_time) FROM prod.db.good_events)
+
+UPDATE prod.db.orders AS t1
+SET order_status = 'returned'
+WHERE EXISTS (SELECT oid FROM prod.db.returned_orders WHERE t1.oid = oid)
+```
+
+For more complex row-level updates based on incoming data, see the section on `MERGE INTO`.
+
+## Writing with DataFrames
+
+Spark 3 introduced the new `DataFrameWriterV2` API for writing to tables using data frames. The v2 API is recommended for several reasons:
+
+* CTAS, RTAS, and overwrite by filter are supported
+* All operations consistently write columns to a table by name
+* Hidden partition expressions are supported in `partitionedBy`
+* Overwrite behavior is explicit, either dynamic or by a user-supplied filter
+* The behavior of each operation corresponds to SQL statements
+    - `df.writeTo(t).create()` is equivalent to `CREATE TABLE AS SELECT`
+    - `df.writeTo(t).replace()` is equivalent to `REPLACE TABLE AS SELECT`
+    - `df.writeTo(t).append()` is equivalent to `INSERT INTO`
+    - `df.writeTo(t).overwritePartitions()` is equivalent to dynamic `INSERT OVERWRITE`
+
+The v1 DataFrame `write` API is still supported, but is not recommended.
+
+{{< hint danger >}}
+When writing with the v1 DataFrame API in Spark 3, use `saveAsTable` or `insertInto` to load tables with a catalog.
+Using `format("iceberg")` loads an isolated table reference that will not automatically refresh tables used by queries.
+{{< /hint >}}
+
+
+### Appending data
+
+To append a dataframe to an Iceberg table, use `append`:
+
+```scala
+val data: DataFrame = ...
+data.writeTo("prod.db.table").append()
+```
+
+#### Spark 2.4
+
+In Spark 2.4, use the v1 API with `append` mode and `iceberg` format:
+
+```scala
+data.write
+    .format("iceberg")
+    .mode("append")
+    .save("db.table")
+```
+
+### Overwriting data
+
+To overwrite partitions dynamically, use `overwritePartitions()`:
+
+```scala
+val data: DataFrame = ...
+data.writeTo("prod.db.table").overwritePartitions()
+```
+
+To explicitly overwrite partitions, use `overwrite` to supply a filter:
+
+```scala
+data.writeTo("prod.db.table").overwrite($"level" === "INFO")
+```
+
+#### Spark 2.4
+
+In Spark 2.4, overwrite values in an Iceberg table with `overwrite` mode and `iceberg` format:
+
+```scala
+data.write
+    .format("iceberg")
+    .mode("overwrite")
+    .save("db.table")
+```
+
+{{< hint danger >}}
+**The behavior of overwrite mode changed between Spark 2.4 and Spark 3**.
+{{< /hint >}}
+
+The behavior of DataFrameWriter overwrite mode was undefined in Spark 2.4, but is required to overwrite the entire table in Spark 3. Because of this new requirement, the Iceberg source's behavior changed in Spark 3. In Spark 2.4, the behavior was to dynamically overwrite partitions. To use the Spark 2.4 behavior, add option `overwrite-mode=dynamic`.
+
+### Creating tables
+
+To run a CTAS or RTAS, use `create`, `replace`, or `createOrReplace` operations:
+
+```scala
+val data: DataFrame = ...
+data.writeTo("prod.db.table").create()
+```
+
+Create and replace operations support table configuration methods, like `partitionedBy` and `tableProperty`:
+
+```scala
+data.writeTo("prod.db.table")
+    .tableProperty("write.format.default", "orc")
+    .partitionBy($"level", days($"ts"))
+    .createOrReplace()
+```
+
+## Writing to partitioned tables
+
+Iceberg requires the data to be sorted according to the partition spec per task (Spark partition) in prior to write
+against partitioned table. This applies both Writing with SQL and Writing with DataFrames.
+
+{{< hint info >}}
+Explicit sort is necessary because Spark doesn't allow Iceberg to request a sort before writing as of Spark 3.0.
+[SPARK-23889](https://issues.apache.org/jira/browse/SPARK-23889) is filed to enable Iceberg to require specific
+distribution & sort order to Spark.
+{{< /hint >}}
+
+{{< hint info >}}
+Both global sort (`orderBy`/`sort`) and local sort (`sortWithinPartitions`) work for the requirement.
+{{< /hint >}}
+
+Let's go through writing the data against below sample table:
+
+```sql
+CREATE TABLE prod.db.sample (
+    id bigint,
+    data string,
+    category string,
+    ts timestamp)
+USING iceberg
+PARTITIONED BY (days(ts), category)
+```
+
+To write data to the sample table, your data needs to be sorted by `days(ts), category`.
+
+If you're inserting data with SQL statement, you can use `ORDER BY` to achieve it, like below:
+
+```sql
+INSERT INTO prod.db.sample
+SELECT id, data, category, ts FROM another_table
+ORDER BY ts, category
+```
+
+If you're inserting data with DataFrame, you can use either `orderBy`/`sort` to trigger global sort, or `sortWithinPartitions`
+to trigger local sort. Local sort for example:
+
+```scala
+data.sortWithinPartitions("ts", "category")
+    .writeTo("prod.db.sample")
+    .append()
+```
+
+You can simply add the original column to the sort condition for the most partition transformations, except `bucket`.
+
+For `bucket` partition transformation, you need to register the Iceberg transform function in Spark to specify it during sort.
+
+Let's go through another sample table having bucket partition:
+
+```sql
+CREATE TABLE prod.db.sample (
+    id bigint,
+    data string,
+    category string,
+    ts timestamp)
+USING iceberg
+PARTITIONED BY (bucket(16, id))
+```
+
+You need to register the function to deal with bucket, like below:
+
+```scala
+import org.apache.iceberg.spark.IcebergSpark
+import org.apache.spark.sql.types.DataTypes
+
+IcebergSpark.registerBucketUDF(spark, "iceberg_bucket16", DataTypes.LongType, 16)
+```
+
+{{< hint info >}}
+Explicit registration of the function is necessary because Spark doesn't allow Iceberg to provide functions.
+[SPARK-27658](https://issues.apache.org/jira/browse/SPARK-27658) is filed to enable Iceberg to provide functions
+which can be used in query.
+{{< /hint >}}
+
+Here we just registered the bucket function as `iceberg_bucket16`, which can be used in sort clause.
+
+If you're inserting data with SQL statement, you can use the function like below:
+
+```sql
+INSERT INTO prod.db.sample
+SELECT id, data, category, ts FROM another_table
+ORDER BY iceberg_bucket16(id)
+```
+
+If you're inserting data with DataFrame, you can use the function like below:
+
+```scala
+data.sortWithinPartitions(expr("iceberg_bucket16(id)"))
+    .writeTo("prod.db.sample")
+    .append()
+```
+
+
+## Type compatibility
+
+Spark and Iceberg support different set of types. Iceberg does the type conversion automatically, but not for all combinations,
+so you may want to understand the type conversion in Iceberg in prior to design the types of columns in your tables.
+
+### Spark type to Iceberg type
+
+This type conversion table describes how Spark types are converted to the Iceberg types. The conversion applies on both creating Iceberg table and writing to Iceberg table via Spark.
+
+| Spark           | Iceberg                 | Notes |
+|-----------------|-------------------------|-------|
+| boolean         | boolean                 |       |
+| short           | integer                 |       |
+| byte            | integer                 |       |
+| integer         | integer                 |       |
+| long            | long                    |       |
+| float           | float                   |       |
+| double          | double                  |       |
+| date            | date                    |       |
+| timestamp       | timestamp with timezone |       |
+| char            | string                  |       |
+| varchar         | string                  |       |
+| string          | string                  |       |
+| binary          | binary                  |       |
+| decimal         | decimal                 |       |
+| struct          | struct                  |       |
+| array           | list                    |       |
+| map             | map                     |       |
+
+{{< hint info >}}
+The table is based on representing conversion during creating table. In fact, broader supports are applied on write. Here're some points on write:
+
+* Iceberg numeric types (`integer`, `long`, `float`, `double`, `decimal`) support promotion during writes. e.g. You can write Spark types `short`, `byte`, `integer`, `long` to Iceberg type `long`.
+* You can write to Iceberg `fixed` type using Spark `binary` type. Note that assertion on the length will be performed.
+{{< /hint >}}
+
+### Iceberg type to Spark type
+
+This type conversion table describes how Iceberg types are converted to the Spark types. The conversion applies on reading from Iceberg table via Spark.
+
+| Iceberg                    | Spark                   | Note          |
+|----------------------------|-------------------------|---------------|
+| boolean                    | boolean                 |               |
+| integer                    | integer                 |               |
+| long                       | long                    |               |
+| float                      | float                   |               |
+| double                     | double                  |               |
+| date                       | date                    |               |
+| time                       |                         | Not supported |
+| timestamp with timezone    | timestamp               |               |
+| timestamp without timezone |                         | Not supported |
+| string                     | string                  |               |
+| uuid                       | string                  |               |
+| fixed                      | binary                  |               |
+| binary                     | binary                  |               |
+| decimal                    | decimal                 |               |
+| struct                     | struct                  |               |
+| list                       | array                   |               |
+| map                        | map                     |               |
+
diff --git a/docs/content/docs/tables/_index.md b/docs/content/docs/tables/_index.md
new file mode 100644
index 0000000..033a32a
--- /dev/null
+++ b/docs/content/docs/tables/_index.md
@@ -0,0 +1,5 @@
+---
+bookIconFa: fa-table
+bookFlatSection: true
+weight: 100
+---
\ No newline at end of file
diff --git a/docs/content/docs/tables/configuration.md b/docs/content/docs/tables/configuration.md
new file mode 100644
index 0000000..8c6c32f
--- /dev/null
+++ b/docs/content/docs/tables/configuration.md
@@ -0,0 +1,131 @@
+---
+url: configuration
+aliases:
+    - "tables/configuration"
+---
+<!--
+ - Licensed to the Apache Software Foundation (ASF) under one or more
+ - contributor license agreements.  See the NOTICE file distributed with
+ - this work for additional information regarding copyright ownership.
+ - The ASF licenses this file to You under the Apache License, Version 2.0
+ - (the "License"); you may not use this file except in compliance with
+ - the License.  You may obtain a copy of the License at
+ -
+ -   http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing, software
+ - distributed under the License is distributed on an "AS IS" BASIS,
+ - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ - See the License for the specific language governing permissions and
+ - limitations under the License.
+ -->
+
+# Configuration
+
+## Table properties
+
+Iceberg tables support table properties to configure table behavior, like the default split size for readers.
+
+### Read properties
+
+| Property                          | Default            | Description                                            |
+| --------------------------------- | ------------------ | ------------------------------------------------------ |
+| read.split.target-size            | 134217728 (128 MB) | Target size when combining data input splits           |
+| read.split.metadata-target-size   | 33554432 (32 MB)   | Target size when combining metadata input splits       |
+| read.split.planning-lookback      | 10                 | Number of bins to consider when combining input splits |
+| read.split.open-file-cost         | 4194304 (4 MB)     | The estimated cost to open a file, used as a minimum weight when combining splits. |
+
+### Write properties
+
+| Property                           | Default            | Description                                        |
+| ---------------------------------- | ------------------ | -------------------------------------------------- |
+| write.format.default               | parquet            | Default file format for the table; parquet, avro, or orc |
+| write.parquet.row-group-size-bytes | 134217728 (128 MB) | Parquet row group size                             |
+| write.parquet.page-size-bytes      | 1048576 (1 MB)     | Parquet page size                                  |
+| write.parquet.dict-size-bytes      | 2097152 (2 MB)     | Parquet dictionary page size                       |
+| write.parquet.compression-codec    | gzip               | Parquet compression codec                          |
+| write.parquet.compression-level    | null               | Parquet compression level                          |
+| write.avro.compression-codec       | gzip               | Avro compression codec                             |
+| write.location-provider.impl       | null               | Optional custom implemention for LocationProvider  |
+| write.metadata.compression-codec   | none               | Metadata compression codec; none or gzip           |
+| write.metadata.metrics.default     | truncate(16)       | Default metrics mode for all columns in the table; none, counts, truncate(length), or full |
+| write.metadata.metrics.column.col1 | (not set)          | Metrics mode for column 'col1' to allow per-column tuning; none, counts, truncate(length), or full |
+| write.target-file-size-bytes       | 536870912 (512 MB) | Controls the size of files generated to target about this many bytes |
+| write.distribution-mode            | none               | Defines distribution of write data: __none__: don't shuffle rows; __hash__: hash distribute by partition key ; __range__: range distribute by partition key or sort key if table has an SortOrder |
+| write.wap.enabled                  | false              | Enables write-audit-publish writes |
+| write.summary.partition-limit      | 0                  | Includes partition-level summary stats in snapshot summaries if the changed partition count is less than this limit |
+| write.metadata.delete-after-commit.enabled | false      | Controls whether to delete the oldest version metadata files after commit |
+| write.metadata.previous-versions-max       | 100        | The max number of previous version metadata files to keep before deleting after commit |
+| write.spark.fanout.enabled       | false        | Enables Partitioned-Fanout-Writer writes in Spark |
+
+### Table behavior properties
+
+| Property                           | Default          | Description                                                   |
+| ---------------------------------- | ---------------- | ------------------------------------------------------------- |
+| commit.retry.num-retries           | 4                | Number of times to retry a commit before failing              |
+| commit.retry.min-wait-ms           | 100              | Minimum time in milliseconds to wait before retrying a commit |
+| commit.retry.max-wait-ms           | 60000 (1 min)    | Maximum time in milliseconds to wait before retrying a commit |
+| commit.retry.total-timeout-ms      | 1800000 (30 min) | Maximum time in milliseconds to wait before retrying a commit |
+| commit.status-check.num-retries    | 3                | Number of times to check whether a commit succeeded after a connection is lost before failing due to an unknown commit state |
+| commit.status-check.min-wait-ms    | 1000 (1s)        | Minimum time in milliseconds to wait before retrying a status-check |
+| commit.status-check.max-wait-ms    | 60000 (1 min)    | Maximum time in milliseconds to wait before retrying a status-check |
+| commit.status-check.total-timeout-ms| 1800000 (30 min) | Maximum time in milliseconds to wait before retrying a status-check |
+| commit.manifest.target-size-bytes  | 8388608 (8 MB)   | Target size when merging manifest files                       |
+| commit.manifest.min-count-to-merge | 100              | Minimum number of manifests to accumulate before merging      |
+| commit.manifest-merge.enabled      | true             | Controls whether to automatically merge manifests on writes   |
+| history.expire.max-snapshot-age-ms | 432000000 (5 days) | Default max age of snapshots to keep while expiring snapshots    |
+| history.expire.min-snapshots-to-keep | 1                | Default min number of snapshots to keep while expiring snapshots |
+
+### Compatibility flags
+
+| Property                                      | Default  | Description                                                   |
+| --------------------------------------------- | -------- | ------------------------------------------------------------- |
+| compatibility.snapshot-id-inheritance.enabled | false    | Enables committing snapshots without explicit snapshot IDs    |
+
+## Catalog properties
+
+Iceberg catalogs support using catalog properties to configure catalog behaviors. Here is a list of commonly used catalog properties:
+
+| Property                          | Default            | Description                                            |
+| --------------------------------- | ------------------ | ------------------------------------------------------ |
+| catalog-impl                      | null               | a custom `Catalog` implementation to use by an engine  |
+| io-impl                           | null               | a custom `FileIO` implementation to use in a catalog   |
+| warehouse                         | null               | the root path of the data warehouse                    |
+| uri                               | null               | a URI string, such as Hive metastore URI               |
+| clients                           | 2                  | client pool size                                       |
+
+`HadoopCatalog` and `HiveCatalog` can access the properties in their constructors.
+Any other custom catalog can access the properties by implementing `Catalog.initialize(catalogName, catalogProperties)`.
+The properties can be manually constructed or passed in from a compute engine like Spark or Flink.
+Spark uses its session properties as catalog properties, see more details in the [Spark configuration](../spark-configuration#catalog-configuration) section.
+Flink passes in catalog properties through `CREATE CATALOG` statement, see more details in the [Flink](../flink/#creating-catalogs-and-using-catalogs) section.
+
+### Lock catalog properties
+
+Here are the catalog properties related to locking. They are used by some catalog implementations to control the locking behavior during commits.
+
+| Property                          | Default            | Description                                            |
+| --------------------------------- | ------------------ | ------------------------------------------------------ |
+| lock-impl                         | null               | a custom implementation of the lock manager, the actual interface depends on the catalog used  |
+| lock.table                        | null               | an auxiliary table for locking, such as in [AWS DynamoDB lock manager](../aws/#dynamodb-for-commit-locking)  |
+| lock.acquire-interval-ms          | 5 seconds          | the interval to wait between each attempt to acquire a lock  |
+| lock.acquire-timeout-ms           | 3 minutes          | the maximum time to try acquiring a lock               |
+| lock.heartbeat-interval-ms        | 3 seconds          | the interval to wait between each heartbeat after acquiring a lock  |
+| lock.heartbeat-timeout-ms         | 15 seconds         | the maximum time without a heartbeat to consider a lock expired  |
+
+
+## Hadoop configuration
+
+The following properties from the Hadoop configuration are used by the Hive Metastore connector.
+
+| Property                              | Default          | Description                                                                        |
+| ------------------------------------- | ---------------- | ---------------------------------------------------------------------------------- |
+| iceberg.hive.client-pool-size         | 5                | The size of the Hive client pool when tracking tables in HMS                       |
+| iceberg.hive.lock-timeout-ms          | 180000 (3 min)   | Maximum time in milliseconds to acquire a lock                                     |
+| iceberg.hive.lock-check-min-wait-ms   | 50               | Minimum time in milliseconds to check back on the status of lock acquisition       |
+| iceberg.hive.lock-check-max-wait-ms   | 5000             | Maximum time in milliseconds to check back on the status of lock acquisition       |
+
+Note: `iceberg.hive.lock-check-max-wait-ms` should be less than the [transaction timeout](https://cwiki.apache.org/confluence/display/Hive/Configuration+Properties#ConfigurationProperties-hive.txn.timeout) 
+of the Hive Metastore (`hive.txn.timeout` or `metastore.txn.timeout` in the newer versions). Otherwise, the heartbeats on the lock (which happens during the lock checks) would end up expiring in the 
+Hive Metastore before the lock is retried from Iceberg.
+
diff --git a/docs/content/docs/tables/evolution.md b/docs/content/docs/tables/evolution.md
new file mode 100644
index 0000000..c47c8b4
--- /dev/null
+++ b/docs/content/docs/tables/evolution.md
@@ -0,0 +1,103 @@
+---
+url: evolution
+aliases:
+    - "tables/evolution"
+---
+<!--
+ - Licensed to the Apache Software Foundation (ASF) under one or more
+ - contributor license agreements.  See the NOTICE file distributed with
+ - this work for additional information regarding copyright ownership.
+ - The ASF licenses this file to You under the Apache License, Version 2.0
+ - (the "License"); you may not use this file except in compliance with
+ - the License.  You may obtain a copy of the License at
+ -
+ -   http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing, software
+ - distributed under the License is distributed on an "AS IS" BASIS,
+ - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ - See the License for the specific language governing permissions and
+ - limitations under the License.
+ -->
+ 
+# Evolution
+
+Iceberg supports **in-place table evolution**. You can [evolve a table schema](#schema-evolution) just like SQL -- even in nested structures -- or [change partition layout](#partition-evolution) when data volume changes. Iceberg does not require costly distractions, like rewriting table data or migrating to a new table.
+
+For example, Hive table partitioning cannot change so moving from a daily partition layout to an hourly partition layout requires a new table. And because queries are dependent on partitions, queries must be rewritten for the new table. In some cases, even changes as simple as renaming a column are either not supported, or can cause [data correctness](#correctness) problems.
+
+## Schema evolution
+
+Iceberg supports the following schema evolution changes:
+
+* **Add** -- add a new column to the table or to a nested struct
+* **Drop** -- remove an existing column from the table or a nested struct
+* **Rename** -- rename an existing column or field in a nested struct
+* **Update** -- widen the type of a column, struct field, map key, map value, or list element
+* **Reorder** -- change the order of columns or fields in a nested struct
+
+Iceberg schema updates are **metadata changes**, so no data files need to be rewritten to perform the update.
+
+Note that map keys do not support adding or dropping struct fields that would change equality.
+
+### Correctness
+
+Iceberg guarantees that **schema evolution changes are independent and free of side-effects**, without rewriting files:
+
+1.  Added columns never read existing values from another column.
+2.  Dropping a column or field does not change the values in any other column.
+3.  Updating a column or field does not change values in any other column.
+4.  Changing the order of columns or fields in a struct does not change the values associated with a column or field name.
+
+Iceberg uses unique IDs to track each column in a table. When you add a column, it is assigned a new ID so existing data is never used by mistake.
+
+* Formats that track columns by name can inadvertently un-delete a column if a name is reused, which violates #1.
+* Formats that track columns by position cannot delete columns without changing the names that are used for each column, which violates #2.
+
+
+## Partition evolution
+
+Iceberg table partitioning can be updated in an existing table because queries do not reference partition values directly.
+
+When you evolve a partition spec, the old data written with an earlier spec remains unchanged. New data is written using the new spec in a new layout. Metadata for each of the partition versions is kept separately. Because of this, when you start writing queries, you get split planning. This is where each partition layout plans files separately using the filter it derives for that specific partition layout. Here's a visual representation of a contrived example: 
+
+![Partition evolution diagram](../img/partition-spec-evolution.png)
+*The data for 2008 is partitioned by month. Starting from 2009 the table is updated so that the data is instead partitioned by day. Both partitioning layouts are able to coexist in the same table.*
+
+Iceberg uses [hidden partitioning](../partitioning), so you don't *need* to write queries for a specific partition layout to be fast. Instead, you can write queries that select the data you need, and Iceberg automatically prunes out files that don't contain matching data.
+
+Partition evolution is a metadata operation and does not eagerly rewrite files.
+
+Iceberg's Java table API provides `updateSpec` API to update partition spec. 
+For example, the following code could be used to update the partition spec to add a new partition field that places `id` column values into 8 buckets and remove an existing partition field `category`:
+
+```java
+Table sampleTable = ...;
+sampleTable.updateSpec()
+    .addField(bucket("id", 8))
+    .removeField("category")
+    .commit();
+```
+
+Spark supports updating partition spec through its `ALTER TABLE` SQL statement, see more details in [Spark SQL](../spark-ddl/#alter-table--add-partition-field).
+
+## Sort order evolution
+
+Similar to partition spec, Iceberg sort order can also be updated in an existing table.
+When you evolve a sort order, the old data written with an earlier order remains unchanged.
+Engines can always choose to write data in the latest sort order or unsorted when sorting is prohibitively expensive.
+
+Iceberg's Java table API provides `replaceSortOrder` API to update partition spec. 
+For example, the following code could be used to create a new sort order 
+with `id` column sorted in ascending order with nulls last,
+and `category` column sorted in descending order with nulls first:
+
+```java
+Table sampleTable = ...;
+sampleTable.replaceSortOrder()
+   .asc("id", NullOrder.NULLS_LAST)
+   .dec("category", NullOrder.NULL_FIRST)
+   .commit();
+```
+
+Spark supports updating sort order through its `ALTER TABLE` SQL statement, see more details in [Spark SQL](../spark-ddl/#alter-table--write-ordered-by).
diff --git a/docs/content/docs/tables/maintenance.md b/docs/content/docs/tables/maintenance.md
new file mode 100644
index 0000000..21e49fd
--- /dev/null
+++ b/docs/content/docs/tables/maintenance.md
@@ -0,0 +1,149 @@
+---
+url: maintenance
+aliases:
+    - "tables/maintenance"
+---
+<!--
+ - Licensed to the Apache Software Foundation (ASF) under one or more
+ - contributor license agreements.  See the NOTICE file distributed with
+ - this work for additional information regarding copyright ownership.
+ - The ASF licenses this file to You under the Apache License, Version 2.0
+ - (the "License"); you may not use this file except in compliance with
+ - the License.  You may obtain a copy of the License at
+ -
+ -   http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing, software
+ - distributed under the License is distributed on an "AS IS" BASIS,
+ - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ - See the License for the specific language governing permissions and
+ - limitations under the License.
+ -->
+
+# Maintenance
+
+{{< hint info >}}
+Maintenance operations require the `Table` instance. Please refer [Java API quickstart](../java-api-quickstart/#create-a-table) page to refer how to load an existing table.
+{{< /hint >}}
+## Recommended Maintenance
+
+### Expire Snapshots
+
+Each write to an Iceberg table creates a new _snapshot_, or version, of a table. Snapshots can be used for time-travel queries, or the table can be rolled back to any valid snapshot.
+
+Snapshots accumulate until they are expired by the [`expireSnapshots`](../javadoc/{{% icebergVersion %}}/org/apache/iceberg/Table.html#expireSnapshots--) operation. Regularly expiring snapshots is recommended to delete data files that are no longer needed, and to keep the size of table metadata small.
+
+This example expires snapshots that are older than 1 day:
+
+```java
+Table table = ...
+long tsToExpire = System.currentTimeMillis() - (1000 * 60 * 60 * 24); // 1 day
+table.expireSnapshots()
+     .expireOlderThan(tsToExpire)
+     .commit();
+```
+
+See the [`ExpireSnapshots` Javadoc](../javadoc/{{% icebergVersion %}}/org/apache/iceberg/ExpireSnapshots.html) to see more configuration options.
+
+There is also a Spark action that can run table expiration in parallel for large tables:
+
+```java
+Actions.forTable(table)
+    .expireSnapshots()
+    .expireOlderThan(tsToExpire)
+    .execute();
+```
+
+Expiring old snapshots removes them from metadata, so they are no longer available for time travel queries.
+
+{{< hint info >}}
+Data files are not deleted until they are no longer referenced by a snapshot that may be used for time travel or rollback.
+Regularly expiring snapshots deletes unused data files.
+{{< /hint >}}
+
+### Remove old metadata files
+
+Iceberg keeps track of table metadata using JSON files. Each change to a table produces a new metadata file to provide atomicity.
+
+Old metadata files are kept for history by default. Tables with frequent commits, like those written by streaming jobs, may need to regularly clean metadata files.
+
+To automatically clean metadata files, set `write.metadata.delete-after-commit.enabled=true` in table properties. This will keep some metadata files (up to `write.metadata.previous-versions-max`) and will delete the oldest metadata file after each new one is created.
+
+| Property                                     | Description                                                  |
+| -------------------------------------------- | ------------------------------------------------------------ |
+| `write.metadata.delete-after-commit.enabled` | Whether to delete old metadata files after each table commit |
+| `write.metadata.previous-versions-max`       | The number of old metadata files to keep                     |
+
+See [table write properties](../configuration/#write-properties) for more details.
+
+### Remove orphan files
+
+In Spark and other distributed processing engines, task or job failures can leave files that are not referenced by table metadata, and in some cases normal snapshot expiration may not be able to determine a file is no longer needed and delete it.
+
+To clean up these "orphan" files under a table location, use the `removeOrphanFiles` action.
+
+```java
+Table table = ...
+Actions.forTable(table)
+    .removeOrphanFiles()
+    .execute();
+```
+
+See the [RemoveOrphanFilesAction Javadoc](../javadoc/{{% icebergVersion %}}/org/apache/iceberg/actions/RemoveOrphanFilesAction.html) to see more configuration options.
+
+This action may take a long time to finish if you have lots of files in data and metadata directories. It is recommended to execute this periodically, but you may not need to execute this often.
+
+{{< hint info >}}
+It is dangerous to remove orphan files with a retention interval shorter than the time expected for any write to complete because it
+might corrupt the table if in-progress files are considered orphaned and are deleted. The default interval is 3 days.
+{{< /hint >}}
+    
+{{< hint info >}}
+Iceberg uses the string representations of paths when determining which files need to be removed. On some file systems,
+the path can change over time, but it still represents the same file. For example, if you change authorities for an HDFS cluster, 
+none of the old path urls used during creation will match those that appear in a current listing. *This will lead to data loss when 
+RemoveOrphanFiles is run*. Please be sure the entries in your MetadataTables match those listed by the Hadoop
+FileSystem API to avoid unintentional deletion. 
+{{< /hint >}}
+
+## Optional Maintenance
+
+Some tables require additional maintenance. For example, streaming queries may produce small data files that should be [compacted into larger files](#compact-data-files). And some tables can benefit from [rewriting manifest files](#rewrite-manifests) to make locating data for queries much faster.
+
+### Compact data files
+
+Iceberg tracks each data file in a table. More data files leads to more metadata stored in manifest files, and small data files causes an unnecessary amount of metadata and less efficient queries from file open costs.
+
+Iceberg can compact data files in parallel using Spark with the `rewriteDataFiles` action. This will combine small files into larger files to reduce metadata overhead and runtime file open cost.
+
+```java
+Table table = ...
+Actions.forTable(table).rewriteDataFiles()
+    .filter(Expressions.equal("date", "2020-08-18"))
+    .targetSizeInBytes(500 * 1024 * 1024) // 500 MB
+    .execute();
+```
+
+The `files` metadata table is useful for inspecting data file sizes and determining when to compact partitons.
+
+See the [`RewriteDataFilesAction` Javadoc](../javadoc/{{% icebergVersion %}}/org/apache/iceberg/actions/RewriteDataFilesAction.html) to see more configuration options.
+
+### Rewrite manifests
+
+Iceberg uses metadata in its manifest list and manifest files speed up query planning and to prune unnecessary data files. The metadata tree functions as an index over a table's data.
+
+Manifests in the metadata tree are automatically compacted in the order they are added, which makes queries faster when the write pattern aligns with read filters. For example, writing hourly-partitioned data as it arrives is aligned with time range query filters.
+
+When a table's write pattern doesn't align with the query pattern, metadata can be rewritten to re-group data files into manifests using `rewriteManifests` or the `rewriteManifests` action (for parallel rewrites using Spark).
+
+This example rewrites small manifests and groups data files by the first partition field.
+
+```java
+Table table = ...
+table.rewriteManifests()
+    .rewriteIf(file -> file.length() < 10 * 1024 * 1024) // 10 MB
+    .clusterBy(file -> file.partition().get(0, Integer.class))
+    .commit();
+```
+
+See the [`RewriteManifestsAction` Javadoc](../javadoc/{{% icebergVersion %}}/org/apache/iceberg/actions/RewriteManifestsAction.html) to see more configuration options.
diff --git a/docs/content/docs/tables/partitioning.md b/docs/content/docs/tables/partitioning.md
new file mode 100644
index 0000000..a5e111d
--- /dev/null
+++ b/docs/content/docs/tables/partitioning.md
@@ -0,0 +1,97 @@
+---
+url: partitioning
+aliases:
+    - "tables/partitioning"
+---
+<!--
+ - Licensed to the Apache Software Foundation (ASF) under one or more
+ - contributor license agreements.  See the NOTICE file distributed with
+ - this work for additional information regarding copyright ownership.
+ - The ASF licenses this file to You under the Apache License, Version 2.0
+ - (the "License"); you may not use this file except in compliance with
+ - the License.  You may obtain a copy of the License at
+ -
+ -   http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing, software
+ - distributed under the License is distributed on an "AS IS" BASIS,
+ - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ - See the License for the specific language governing permissions and
+ - limitations under the License.
+ -->
+
+# Partitioning
+
+## What is partitioning?
+
+Partitioning is a way to make queries faster by grouping similar rows together when writing.
+
+For example, queries for log entries from a `logs` table would usually include a time range, like this query for logs between 10 and 12 AM:
+
+```sql
+SELECT level, message FROM logs
+WHERE event_time BETWEEN '2018-12-01 10:00:00' AND '2018-12-01 12:00:00'
+```
+
+Configuring the `logs` table to partition by the date of `event_time` will group log events into files with the same event date. Iceberg keeps track of that date and will use it to skip files for other dates that don't have useful data.
+
+Iceberg can partition timestamps by year, month, day, and hour granularity. It can also use a categorical column, like `level` in this logs example, to store rows together and speed up queries.
+
+
+## What does Iceberg do differently?
+
+Other tables formats like Hive support partitioning, but Iceberg supports *hidden partitioning*.
+
+* Iceberg handles the tedious and error-prone task of producing partition values for rows in a table.
+* Iceberg avoids reading unnecessary partitions automatically. Consumers don't need to know how the table is partitioned and add extra filters to their queries.
+* Iceberg partition layouts can evolve as needed.
+
+### Partitioning in Hive
+
+To demonstrate the difference, consider how Hive would handle a `logs` table.
+
+In Hive, partitions are explicit and appear as a column, so the `logs` table would have a column called `event_date`. When writing, an insert needs to supply the data for the `event_date` column:
+
+```sql
+INSERT INTO logs PARTITION (event_date)
+  SELECT level, message, event_time, format_time(event_time, 'YYYY-MM-dd')
+  FROM unstructured_log_source
+```
+
+Similarly, queries that search through the `logs` table must have an `event_date` filter in addition to an `event_time` filter.
+
+```sql
+SELECT level, count(1) as count FROM logs
+WHERE event_time BETWEEN '2018-12-01 10:00:00' AND '2018-12-01 12:00:00'
+  AND event_date = '2018-12-01'
+```
+
+If the `event_date` filter were missing, Hive would scan through every file in the table because it doesn't know that the `event_time` column is related to the `event_date` column.
+
+### Problems with Hive partitioning
+
+Hive must be given partition values. In the logs example, it doesn't know the relationship between `event_time` and `event_date`.
+
+This leads to several problems:
+
+* Hive can't validate partition values -- it is up to the writer to produce the correct value
+    - Using the wrong format, `2018-12-01` instead of `20181201`, produces silently incorrect results, not query failures
+    - Using the wrong source column, like `processing_time`, or time zone also causes incorrect results, not failures
+* It is up to the user to write queries correctly
+    - Using the wrong format also leads to silently incorrect results
+    - Users that don't understand a table's physical layout get needlessly slow queries -- Hive can't translate filters automatically
+* Working queries are tied to the table's partitioning scheme, so partitioning configuration cannot be changed without breaking queries
+
+### Iceberg's hidden partitioning
+
+Iceberg produces partition values by taking a column value and optionally transforming it. Iceberg is responsible for converting `event_time` into `event_date`, and keeps track of the relationship.
+
+Table partitioning is configured using these relationships. The `logs` table would be partitioned by `date(event_time)` and `level`.
+
+Because Iceberg doesn't require user-maintained partition columns, it can hide partitioning. Partition values are produced correctly every time and always used to speed up queries, when possible. Producers and consumers wouldn't even see `event_date`.
+
+Most importantly, queries no longer depend on a table's physical layout. With a separation between physical and logical, Iceberg tables can evolve partition schemes over time as data volume changes. Misconfigured tables can be fixed without an expensive migration.
+
+For details about all the supported hidden partition transformations, see the [Partition Transforms](../spec/#partition-transforms) section.
+
+For details about updating a table's partition spec, see the [partition evolution](../evolution/#partition-evolution) section.
diff --git a/docs/content/docs/tables/performance.md b/docs/content/docs/tables/performance.md
new file mode 100644
index 0000000..6a8381e
--- /dev/null
+++ b/docs/content/docs/tables/performance.md
@@ -0,0 +1,57 @@
+---
+url: performance
+aliases:
+    - "tables/performance"
+---
+<!--
+ - Licensed to the Apache Software Foundation (ASF) under one or more
+ - contributor license agreements.  See the NOTICE file distributed with
+ - this work for additional information regarding copyright ownership.
+ - The ASF licenses this file to You under the Apache License, Version 2.0
+ - (the "License"); you may not use this file except in compliance with
+ - the License.  You may obtain a copy of the License at
+ -
+ -   http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing, software
+ - distributed under the License is distributed on an "AS IS" BASIS,
+ - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ - See the License for the specific language governing permissions and
+ - limitations under the License.
+ -->
+
+# Performance
+
+* Iceberg is designed for huge tables and is used in production where a *single table* can contain tens of petabytes of data.
+* Even multi-petabyte tables can be read from a single node, without needing a distributed SQL engine to sift through table metadata.
+
+## Scan planning
+
+Scan planning is the process of finding the files in a table that are needed for a query.
+
+Planning in an Iceberg table fits on a single node because Iceberg's metadata can be used to prune *metadata* files that aren't needed, in addition to filtering *data* files that don't contain matching data.
+
+Fast scan planning from a single node enables:
+
+* Lower latency SQL queries -- by eliminating a distributed scan to plan a distributed scan
+* Access from any client -- stand-alone processes can read data directly from Iceberg tables
+
+### Metadata filtering
+
+Iceberg uses two levels of metadata to track the files in a snapshot.
+
+* **Manifest files** store a list of data files, along each data file's partition data and column-level stats
+* A **manifest list** stores the snapshot's list of manifests, along with the range of values for each partition field
+
+For fast scan planning, Iceberg first filters manifests using the partition value ranges in the manifest list. Then, it reads each manifest to get data files. With this scheme, the manifest list acts as an index over the manifest files, making it possible to plan without reading all manifests.
+
+In addition to partition value ranges, a manifest list also stores the number of files added or deleted in a manifest to speed up operations like snapshot expiration.
+
+### Data filtering
+
+Manifest files include a tuple of partition data and column-level stats for each data file.
+
+During planning, query predicates are automatically converted to predicates on the partition data and applied first to filter data files. Next, column-level value counts, null counts, lower bounds, and upper bounds are used to eliminate files that cannot match the query predicate.
+
+By using upper and lower bounds to filter data files at planning time, Iceberg uses clustered data to eliminate splits without running tasks. In some cases, this is a [10x performance improvement](https://conferences.oreilly.com/strata/strata-ny-2018/cdn.oreillystatic.com/en/assets/1/event/278/Introducing%20Iceberg_%20Tables%20designed%20for%20object%20stores%20Presentation.pdf
+).
diff --git a/docs/content/docs/tables/reliability.md b/docs/content/docs/tables/reliability.md
new file mode 100644
index 0000000..f51eb54
--- /dev/null
+++ b/docs/content/docs/tables/reliability.md
@@ -0,0 +1,70 @@
+---
+url: reliability
+aliases:
+    - "tables/reliability"
+---
+<!--
+ - Licensed to the Apache Software Foundation (ASF) under one or more
+ - contributor license agreements.  See the NOTICE file distributed with
+ - this work for additional information regarding copyright ownership.
+ - The ASF licenses this file to You under the Apache License, Version 2.0
+ - (the "License"); you may not use this file except in compliance with
+ - the License.  You may obtain a copy of the License at
+ -
+ -   http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing, software
+ - distributed under the License is distributed on an "AS IS" BASIS,
+ - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ - See the License for the specific language governing permissions and
+ - limitations under the License.
+ -->
+
+# Reliability
+
+Iceberg was designed to solve correctness problems that affect Hive tables running in S3.
+
+Hive tables track data files using both a central metastore for partitions and a file system for individual files. This makes atomic changes to a table's contents impossible, and eventually consistent stores like S3 may return incorrect results due to the use of listing files to reconstruct the state of a table. It also requires job planning to make many slow listing calls: O(n) with the number of partitions.
+
+Iceberg tracks the complete list of data files in each [snapshot](../terms#snapshot) using a persistent tree structure. Every write or delete produces a new snapshot that reuses as much of the previous snapshot's metadata tree as possible to avoid high write volumes.
+
+Valid snapshots in an Iceberg table are stored in the table metadata file, along with a reference to the current snapshot. Commits replace the path of the current table metadata file using an atomic operation. This ensures that all updates to table data and metadata are atomic, and is the basis for [serializable isolation](https://en.wikipedia.org/wiki/Isolation_(database_systems)#Serializable).
+
+This results in improved reliability guarantees:
+
+* **Serializable isolation**: All table changes occur in a linear history of atomic table updates
+* **Reliable reads**: Readers always use a consistent snapshot of the table without holding a lock
+* **Version history and rollback**: Table snapshots are kept as history and tables can roll back if a job produces bad data
+* **Safe file-level operations**. By supporting atomic changes, Iceberg enables new use cases, like safely compacting small files and safely appending late data to tables
+
+This design also has performance benefits:
+
+* **O(1) RPCs to plan**: Instead of listing O(n) directories in a table to plan a job, reading a snapshot requires O(1) RPC calls
+* **Distributed planning**: File pruning and predicate push-down is distributed to jobs, removing the metastore as a bottleneck
+* **Finer granularity partitioning**: Distributed planning and O(1) RPC calls remove the current barriers to finer-grained partitioning
+
+
+## Concurrent write operations
+
+Iceberg supports multiple concurrent writes using optimistic concurrency.
+
+Each writer assumes that no other writers are operating and writes out new table metadata for an operation. Then, the writer attempts to commit by atomically swapping the new table metadata file for the existing metadata file.
+
+If the atomic swap fails because another writer has committed, the failed writer retries by writing a new metadata tree based on the the new current table state.
+
+### Cost of retries
+
+Writers avoid expensive retry operations by structuring changes so that work can be reused across retries.
+
+For example, appends usually create a new manifest file for the appended data files, which can be added to the table without rewriting the manifest on every attempt.
+
+### Retry validation
+
+Commits are structured as assumptions and actions. After a conflict, a writer checks that the assumptions are met by the current table state. If the assumptions are met, then it is safe to re-apply the actions and commit.
+
+For example, a compaction might rewrite `file_a.avro` and `file_b.avro` as `merged.parquet`. This is safe to commit as long as the table still contains both `file_a.avro` and `file_b.avro`. If either file was deleted by a conflicting commit, then the operation must fail. Otherwise, it is safe to remove the source files and add the merged file.
+
+
+## Compatibility
+
+By avoiding file listing and rename operations, Iceberg tables are compatible with any object store. No consistent listing is required.
diff --git a/docs/content/docs/tables/schemas.md b/docs/content/docs/tables/schemas.md
new file mode 100644
index 0000000..4006dc2
--- /dev/null
+++ b/docs/content/docs/tables/schemas.md
@@ -0,0 +1,46 @@
+---
+url: schemas
+aliases:
+    - "tables/schemas"
+---
+<!--
+ - Licensed to the Apache Software Foundation (ASF) under one or more
+ - contributor license agreements.  See the NOTICE file distributed with
+ - this work for additional information regarding copyright ownership.
+ - The ASF licenses this file to You under the Apache License, Version 2.0
+ - (the "License"); you may not use this file except in compliance with
+ - the License.  You may obtain a copy of the License at
+ -
+ -   http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing, software
+ - distributed under the License is distributed on an "AS IS" BASIS,
+ - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ - See the License for the specific language governing permissions and
+ - limitations under the License.
+ -->
+
+# Schemas
+
+Iceberg tables support the following types:
+
+| Type               | Description                                                              | Notes                                            |
+|--------------------|--------------------------------------------------------------------------|--------------------------------------------------|
+| **`boolean`**      | True or false                                                            |                                                  |
+| **`int`**          | 32-bit signed integers                                                   | Can promote to `long`                            |
+| **`long`**         | 64-bit signed integers                                                   |                                                  |
+| **`float`**        | [32-bit IEEE 754](https://en.wikipedia.org/wiki/IEEE_754) floating point | Can promote to `double`                          |
+| **`double`**       | [64-bit IEEE 754](https://en.wikipedia.org/wiki/IEEE_754) floating point |                                                  |
+| **`decimal(P,S)`** | Fixed-point decimal; precision P, scale S                                | Scale is fixed and precision must be 38 or less  |
+| **`date`**         | Calendar date without timezone or time                                   |                                                  |
+| **`time`**         | Time of day without date, timezone                                       | Stored as microseconds                           |
+| **`timestamp`**    | Timestamp without timezone                                               | Stored as microseconds                           |
+| **`timestamptz`**  | Timestamp with timezone                                                  | Stored as microseconds                           |
+| **`string`**       | Arbitrary-length character sequences                                     | Encoded with UTF-8                               |
+| **`fixed(L)`**     | Fixed-length byte array of length L                                      |                                                  |
+| **`binary`**       | Arbitrary-length byte array                                              |                                                  |
+| **`struct<...>`**  | A record with named fields of any data type                              |                                                  |
+| **`list<E>`**      | A list with elements of any data type                                    |                                                  |
+| **`map<K, V>`**    | A map with keys and values of any data type                              |                                                  |
+
+Iceberg tracks each field in a table schema using an ID that is never reused in a table. See [correctness guarantees](../evolution#correctness) for more information.
diff --git a/docs/content/docs/trino/_index.md b/docs/content/docs/trino/_index.md
new file mode 100644
index 0000000..4079bfa
--- /dev/null
+++ b/docs/content/docs/trino/_index.md
@@ -0,0 +1,23 @@
+---
+title: "Trino"
+bookIconImage: ../img/trino-logo.png
+bookFlatSection: true
+weight: 410
+bookExternalUrlNewWindow: https://trino.io/docs/current/connector/iceberg.html
+---
+<!--
+ - Licensed to the Apache Software Foundation (ASF) under one or more
+ - contributor license agreements.  See the NOTICE file distributed with
+ - this work for additional information regarding copyright ownership.
+ - The ASF licenses this file to You under the Apache License, Version 2.0
+ - (the "License"); you may not use this file except in compliance with
+ - the License.  You may obtain a copy of the License at
+ -
+ -   http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing, software
+ - distributed under the License is distributed on an "AS IS" BASIS,
+ - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ - See the License for the specific language governing permissions and
+ - limitations under the License.
+ -->
\ No newline at end of file
diff --git a/docs/layouts/partials/docs/brand.html b/docs/layouts/partials/docs/brand.html
new file mode 100644
index 0000000..52af2a8
--- /dev/null
+++ b/docs/layouts/partials/docs/brand.html
@@ -0,0 +1,11 @@
+<h2 class="book-brand">
+  <a class="flex align-center" href="{{ cond (not .Site.Home.File) .Sites.First.Home.RelPermalink .Site.Home.RelPermalink }}../../">
+    {{- with .Site.Params.BookLogo -}}
+    <img src="{{ . | relURL }}" alt="Logo" />
+    {{- end -}}
+    <span>{{ .Site.Title }}</span>
+  </a>
+  <a href="{{ .Site.BaseURL }}/../../releases">
+    <img id="version-shield" src="https://img.shields.io/badge/version-{{ if eq $.Site.Params.versions.iceberg `latest` }}{{ $.Site.Params.latestVersions.iceberg }}{{ else if eq $.Site.Params.versions.iceberg `main` }}{{ $.Site.Params.latestVersions.iceberg }}{{ else if eq $.Site.Params.versions.iceberg `` }}{{ $.Site.Params.latestVersions.iceberg }}{{ else }}{{ $.Site.Params.versions.iceberg }}{{ end }}-blue" alt="" />
+  </a>
+</h2>
diff --git a/docs/layouts/partials/docs/header.html b/docs/layouts/partials/docs/header.html
new file mode 100644
index 0000000..56ca903
--- /dev/null
+++ b/docs/layouts/partials/docs/header.html
@@ -0,0 +1,14 @@
+<div class="flex align-center justify-between">
+  <link rel="stylesheet" href="{{ "fontawesome/css/font-awesome.min.css" | relURL }}">
+  <label for="menu-control">
+    <img src="{{ "svg/menu.svg" | relURL }}" class="book-icon" alt="Menu" />
+  </label>
+
+  <strong>{{ partial "docs/title" . }}</strong>
+
+  <label for="toc-control">
+    {{ if default true (default .Site.Params.BookToC .Params.BookToC) }}
+    <img src="{{ "svg/toc.svg" | relURL }}" class="book-icon" alt="Table of Contents" />
+    {{ end }}
+  </label>
+</div>
diff --git a/docs/layouts/partials/docs/menu-filetree.html b/docs/layouts/partials/docs/menu-filetree.html
new file mode 100644
index 0000000..c96ab65
--- /dev/null
+++ b/docs/layouts/partials/docs/menu-filetree.html
@@ -0,0 +1,124 @@
+{{ $bookSection := default "docs" .Site.Params.BookSection  }}
+{{ if eq $bookSection "*" }}
+  {{ $bookSection = "/" }}{{/* Backward compatibility */}}
+{{ end }}
+
+{{ with .Site.GetPage $bookSection }}
+  {{ template "book-section-children" (dict "Section" . "CurrentPage" $) }}
+{{ end }}
+
+{{ define "book-section-children" }}{{/* (dict "Section" .Section "CurrentPage" .CurrentPage) */}}
+  <ul>
+    {{ range (where .Section.Pages "Params.bookhidden" "ne" true) }}
+    {{ if .Params.bookExternalUrl }}
+    <li
+    {{- if and (not .Params.BookFlatSection) (not .Params.BookCollapseSection) }}
+    class="navigation-icon-pad"
+  {{- else if  .Params.BookFlatSection }}
+      {{- else if .Params.BookFlatSection }} class="book-section-flat"
+      {{- else if .Params.BookCollapseSection }} class="book-section-collapsed"
+      {{- else if  (.Page.Params.BookExternalUrl) }} class=""
+      {{- else }} class="navigation-icon-pad"
+      {{ end -}}
+    >
+        <a href="{{ .Params.bookExternalUrl }}">
+          {{ template "book-nav-item-logo-image" (dict "Page" .) }}
+          {{.Title}}
+        </a>
+      </li>
+      {{ else if .Params.bookExternalUrlNewWindow }}
+      <li
+      {{- if and (not .Params.BookFlatSection) (not .Params.BookCollapseSection) }}
+      class="navigation-icon-pad"
+    {{- else if  .Params.BookFlatSection }}
+        {{- else if .Params.BookFlatSection }} class="book-section-flat"
+        {{- else if .Params.BookCollapseSection }} class="book-section-collapsed"
+        {{- else if  (.Page.Params.BookExternalUrl) }} class=""
+        {{- else }} class="navigation-icon-pad"
+        {{ end -}}
+      >
+          <a href="{{ .Params.bookExternalUrlNewWindow }}" target="_blank">
+            {{ template "book-nav-item-logo-image" (dict "Page" .) }}
+            {{.Title}}
+          </a>
+        </li>
+      {{ else if .Params.bookUrlFromBaseURL }}
+      <li
+        {{- if and (not .Params.BookFlatSection) (not .Params.BookCollapseSection) }}
+          class="navigation-icon-pad"
+        {{- else if  .Params.BookFlatSection }}
+          class="book-section-flat navigation-icon-pad"
+        {{- else if  .Params.BookCollapseSection }}
+          class="book-section-collapsed navigation-icon-pad"
+        {{ end -}}
+      >
+          <a href="{{ .Page.Site.BaseURL }}/..{{ .Params.bookUrlFromBaseURL }}">
+            {{ template "book-nav-item-logo-image" (dict "Page" .) }}
+            {{.Title}}
+          </a>
+        </li>
+      {{ else if .IsSection }}
+        <li
+          {{- if and (not .Page.Params.bookIconImage) (not .Page.Params.bookIconFa) }} class="book-section-flat navigation-icon-pad"
+          {{- else if .Params.BookFlatSection }} class="book-section-flats"
+          {{- else if .Params.BookCollapseSection }} class="book-section-collapsed"
+          {{- else }} class="navigation-icon-pad"
+          {{ end -}}
+        >
+          {{ template "book-page-link" (dict "Page" . "CurrentPage" $.CurrentPage) }}
+          {{ template "book-section-children" (dict "Section" . "CurrentPage" $.CurrentPage) }}
+        </li>
+      {{ else if and .IsPage .Content }}
+      <li
+      {{- if or .Parent.Params.bookIconImage .Parent.Params.bookIconFa }} class="navigation-icon-pad"
+      {{ end -}}
+      >
+          {{ template "book-page-link" (dict "Page" . "CurrentPage" $.CurrentPage) }}
+        </li>
+      {{ end }}
+    {{ end }}
+  </ul>
+{{ end }}
+
+{{ define "book-page-link" }}{{/* (dict "Page" .Page "CurrentPage" .CurrentPage) */}}
+  {{ $current := eq .CurrentPage .Page }}
+  {{ $ancestor := .Page.IsAncestor .CurrentPage }}
+
+  {{ if .Page.Params.bookCollapseSection }}
+    <input type="checkbox" id="section-{{ md5 .Page }}" class="toggle" {{ if or $current $ancestor }}checked{{ end }} />
+    <label for="section-{{ md5 .Page }}" class="flex justify-between">
+      <a {{ if .Page.Content }}href="{{ .Page.Permalink }}"{{ else }}role="button"{{ end }} class="{{ if $current }}active{{ end }}">
+        {{ template "book-nav-item-logo-image" (dict "Page" .Page) }}
+        {{- partial "docs/title" .Page -}}
+      </a>
+    </label>
+  {{ else if .Page.Content }}
+    <a href="{{ .Page.Permalink }}" class="{{ if $current }} active{{ end }}">
+      {{ template "book-nav-item-logo-image" (dict "Page" .Page) }}
+      {{- partial "docs/title" .Page -}}
+    </a>
+  {{ else }}
+    <span>
+      {{ template "book-nav-item-logo-image" (dict "Page" .Page) }}
+      {{- partial "docs/title" .Page -}}
+    </span>
+  {{ end }}
+{{ end }}
+
+{{ define "book-nav-item-logo-image" }}{{/* (dict "Page" .Page) */}}
+  {{- if .Page.Params.bookIconImage }}
+    <img src="{{ .Page.Site.BaseURL }}/img/{{.Page.Params.bookIconImage}}" class="navigation-icon fa-fw"/>
+  {{- else if .Page.Params.bookIconFa }}
+    <i class="fa {{.Page.Params.bookIconFa}} fa-fw"></i>
+  {{ end -}}
+{{ end }}
+
+{{ define "book-li-classes" }} */}}{{/* (dict "Page" .Page) */}}
+  {{ $hasLogo := false }}
+  {{ if and not .Page.Params.bookIconImage not .Page.Params.bookIconFa }}
+    {{ $hasLogo := true }}
+  {{ else }}
+    {{ $hasLogo := false }}
+  {{ end -}}
+  class="{{ if $hasLogo }} navigation-icon-pad {{ end }}{{ if .Params.BookFlatSection }} book-section-flat {{ end }}"
+{{ end -}}
\ No newline at end of file
diff --git a/docs/layouts/partials/docs/search.html b/docs/layouts/partials/docs/search.html
new file mode 100644
index 0000000..c497ecc
--- /dev/null
+++ b/docs/layouts/partials/docs/search.html
@@ -0,0 +1,13 @@
+{{ if default true .Site.Params.BookSearch }}
+<div class="book-search">
+  <input type="text" id="book-search-input" placeholder="{{ i18n "Search" }}" aria-label="{{ i18n "Search" }}" maxlength="64" data-hotkeys="s/" />
+  <div class="book-search-spinner hidden"></div>
+  <ul id="book-search-results"></ul>
+  <a href="https://github.com/apache/iceberg" target="_blank">
+    <img src="{{ .Site.BaseURL }}/img/GitHub-Mark.png" target="_blank" class="top-external-icon"/>
+  </a>
+  <a href="https://join.slack.com/t/apache-iceberg/shared_invite/zt-tlv0zjz6-jGJEkHfb1~heMCJA3Uycrg" target="_blank">
+    <img src="{{ .Site.BaseURL }}/img/Slack_Mark_Web.png" target="_blank" class="top-external-icon"/>
+  </a>
+</div>
+{{ end }}
diff --git a/docs/layouts/shortcodes/icebergVersion.html b/docs/layouts/shortcodes/icebergVersion.html
new file mode 100644
index 0000000..e5089e8
--- /dev/null
+++ b/docs/layouts/shortcodes/icebergVersion.html
@@ -0,0 +1 @@
+{{ if eq $.Site.Params.versions.iceberg `latest` }}{{ $.Site.Params.latestVersions.iceberg }}{{ else if eq $.Site.Params.versions.iceberg `main` }}{{ $.Site.Params.latestVersions.iceberg }}{{ else if eq $.Site.Params.versions.iceberg `` }}{{ $.Site.Params.latestVersions.iceberg }}{{ else }}{{ $.Site.Params.versions.iceberg }}{{ end }}
\ No newline at end of file
diff --git a/docs/layouts/shortcodes/nessieVersion.html b/docs/layouts/shortcodes/nessieVersion.html
new file mode 100644
index 0000000..557b514
--- /dev/null
+++ b/docs/layouts/shortcodes/nessieVersion.html
@@ -0,0 +1 @@
+{{ .Page.Site.Params.versions.nessie }}
\ No newline at end of file
diff --git a/docs/static/css/bootstrap.css b/docs/static/css/bootstrap.css
new file mode 100644
index 0000000..8352fad
--- /dev/null
+++ b/docs/static/css/bootstrap.css
@@ -0,0 +1,6199 @@
+/*!
+ * Bootstrap v3.2.0 (http://getbootstrap.com)
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ */
+
+/*! normalize.css v3.0.1 | MIT License | git.io/normalize */
+html {
+  font-family: sans-serif;
+  -webkit-text-size-adjust: 100%;
+      -ms-text-size-adjust: 100%;
+}
+body {
+  margin: 0;
+}
+article,
+aside,
+details,
+figcaption,
+figure,
+footer,
+header,
+hgroup,
+main,
+nav,
+section,
+summary {
+  display: block;
+}
+audio,
+canvas,
+progress,
+video {
+  display: inline-block;
+  vertical-align: baseline;
+}
+audio:not([controls]) {
+  display: none;
+  height: 0;
+}
+[hidden],
+template {
+  display: none;
+}
+a {
+  background: transparent;
+}
+a:active,
+a:hover {
+  outline: 0;
+}
+abbr[title] {
+  border-bottom: 1px dotted;
+}
+b,
+strong {
+  font-weight: bold;
+}
+dfn {
+  font-style: italic;
+}
+h1 {
+  margin: .67em 0;
+  font-size: 2em;
+}
+mark {
+  color: #000;
+  background: #ff0;
+}
+small {
+  font-size: 80%;
+}
+sub,
+sup {
+  position: relative;
+  font-size: 75%;
+  line-height: 0;
+  vertical-align: baseline;
+}
+sup {
+  top: -.5em;
+}
+sub {
+  bottom: -.25em;
+}
+img {
+  border: 0;
+}
+svg:not(:root) {
+  overflow: hidden;
+}
+figure {
+  margin: 1em 40px;
+}
+hr {
+  height: 0;
+  -webkit-box-sizing: content-box;
+     -moz-box-sizing: content-box;
+          box-sizing: content-box;
+}
+pre {
+  overflow: auto;
+}
+code,
+kbd,
+pre,
+samp {
+  font-family: monospace, monospace;
+  font-size: 1em;
+}
+button,
+input,
+optgroup,
+select,
+textarea {
+  margin: 0;
+  font: inherit;
+  color: inherit;
+}
+button {
+  overflow: visible;
+}
+button,
+select {
+  text-transform: none;
+}
+button,
+html input[type="button"],
+input[type="reset"],
+input[type="submit"] {
+  -webkit-appearance: button;
+  cursor: pointer;
+}
+button[disabled],
+html input[disabled] {
+  cursor: default;
+}
+button::-moz-focus-inner,
+input::-moz-focus-inner {
+  padding: 0;
+  border: 0;
+}
+input {
+  line-height: normal;
+}
+input[type="checkbox"],
+input[type="radio"] {
+  -webkit-box-sizing: border-box;
+     -moz-box-sizing: border-box;
+          box-sizing: border-box;
+  padding: 0;
+}
+input[type="number"]::-webkit-inner-spin-button,
+input[type="number"]::-webkit-outer-spin-button {
+  height: auto;
+}
+input[type="search"] {
+  -webkit-box-sizing: content-box;
+     -moz-box-sizing: content-box;
+          box-sizing: content-box;
+  -webkit-appearance: textfield;
+}
+input[type="search"]::-webkit-search-cancel-button,
+input[type="search"]::-webkit-search-decoration {
+  -webkit-appearance: none;
+}
+fieldset {
+  padding: .35em .625em .75em;
+  margin: 0 2px;
+  border: 1px solid #c0c0c0;
+}
+legend {
+  padding: 0;
+  border: 0;
+}
+textarea {
+  overflow: auto;
+}
+optgroup {
+  font-weight: bold;
+}
+table {
+  border-spacing: 0;
+  border-collapse: collapse;
+}
+td,
+th {
+  padding: 0;
+}
+@media print {
+  * {
+    color: #000 !important;
+    text-shadow: none !important;
+    background: transparent !important;
+    -webkit-box-shadow: none !important;
+            box-shadow: none !important;
+  }
+  a,
+  a:visited {
+    text-decoration: underline;
+  }
+  a[href]:after {
+    content: " (" attr(href) ")";
+  }
+  abbr[title]:after {
+    content: " (" attr(title) ")";
+  }
+  a[href^="javascript:"]:after,
+  a[href^="#"]:after {
+    content: "";
+  }
+  pre,
+  blockquote {
+    border: 1px solid #999;
+
+    page-break-inside: avoid;
+  }
+  thead {
+    display: table-header-group;
+  }
+  tr,
+  img {
+    page-break-inside: avoid;
+  }
+  img {
+    max-width: 100% !important;
+  }
+  p,
+  h2,
+  h3 {
+    orphans: 3;
+    widows: 3;
+  }
+  h2,
+  h3 {
+    page-break-after: avoid;
+  }
+  select {
+    background: #fff !important;
+  }
+  .navbar {
+    display: none;
+  }
+  .table td,
+  .table th {
+    background-color: #fff !important;
+  }
+  .btn > .caret,
+  .dropup > .btn > .caret {
+    border-top-color: #000 !important;
+  }
+  .label {
+    border: 1px solid #000;
+  }
+  .table {
+    border-collapse: collapse !important;
+  }
+  .table-bordered th,
+  .table-bordered td {
+    border: 1px solid #ddd !important;
+  }
+}
+@font-face {
+  font-family: 'Glyphicons Halflings';
+
+  src: url('../fonts/glyphicons-halflings-regular.eot');
+  src: url('../fonts/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'), url('../fonts/glyphicons-halflings-regular.woff') format('woff'), url('../fonts/glyphicons-halflings-regular.ttf') format('truetype'), url('../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular') format('svg');
+}
+.glyphicon {
+  position: relative;
+  top: 1px;
+  display: inline-block;
+  font-family: 'Glyphicons Halflings';
+  font-style: normal;
+  font-weight: normal;
+  line-height: 1;
+
+  -webkit-font-smoothing: antialiased;
+  -moz-osx-font-smoothing: grayscale;
+}
+.glyphicon-asterisk:before {
+  content: "\2a";
+}
+.glyphicon-plus:before {
+  content: "\2b";
+}
+.glyphicon-euro:before {
+  content: "\20ac";
+}
+.glyphicon-minus:before {
+  content: "\2212";
+}
+.glyphicon-cloud:before {
+  content: "\2601";
+}
+.glyphicon-envelope:before {
+  content: "\2709";
+}
+.glyphicon-pencil:before {
+  content: "\270f";
+}
+.glyphicon-glass:before {
+  content: "\e001";
+}
+.glyphicon-music:before {
+  content: "\e002";
+}
+.glyphicon-search:before {
+  content: "\e003";
+}
+.glyphicon-heart:before {
+  content: "\e005";
+}
+.glyphicon-star:before {
+  content: "\e006";
+}
+.glyphicon-star-empty:before {
+  content: "\e007";
+}
+.glyphicon-user:before {
+  content: "\e008";
+}
+.glyphicon-film:before {
+  content: "\e009";
+}
+.glyphicon-th-large:before {
+  content: "\e010";
+}
+.glyphicon-th:before {
+  content: "\e011";
+}
+.glyphicon-th-list:before {
+  content: "\e012";
+}
+.glyphicon-ok:before {
+  content: "\e013";
+}
+.glyphicon-remove:before {
+  content: "\e014";
+}
+.glyphicon-zoom-in:before {
+  content: "\e015";
+}
+.glyphicon-zoom-out:before {
+  content: "\e016";
+}
+.glyphicon-off:before {
+  content: "\e017";
+}
+.glyphicon-signal:before {
+  content: "\e018";
+}
+.glyphicon-cog:before {
+  content: "\e019";
+}
+.glyphicon-trash:before {
+  content: "\e020";
+}
+.glyphicon-home:before {
+  content: "\e021";
+}
+.glyphicon-file:before {
+  content: "\e022";
+}
+.glyphicon-time:before {
+  content: "\e023";
+}
+.glyphicon-road:before {
+  content: "\e024";
+}
+.glyphicon-download-alt:before {
+  content: "\e025";
+}
+.glyphicon-download:before {
+  content: "\e026";
+}
+.glyphicon-upload:before {
+  content: "\e027";
+}
+.glyphicon-inbox:before {
+  content: "\e028";
+}
+.glyphicon-play-circle:before {
+  content: "\e029";
+}
+.glyphicon-repeat:before {
+  content: "\e030";
+}
+.glyphicon-refresh:before {
+  content: "\e031";
+}
+.glyphicon-list-alt:before {
+  content: "\e032";
+}
+.glyphicon-lock:before {
+  content: "\e033";
+}
+.glyphicon-flag:before {
+  content: "\e034";
+}
+.glyphicon-headphones:before {
+  content: "\e035";
+}
+.glyphicon-volume-off:before {
+  content: "\e036";
+}
+.glyphicon-volume-down:before {
+  content: "\e037";
+}
+.glyphicon-volume-up:before {
+  content: "\e038";
+}
+.glyphicon-qrcode:before {
+  content: "\e039";
+}
+.glyphicon-barcode:before {
+  content: "\e040";
+}
+.glyphicon-tag:before {
+  content: "\e041";
+}
+.glyphicon-tags:before {
+  content: "\e042";
+}
+.glyphicon-book:before {
+  content: "\e043";
+}
+.glyphicon-bookmark:before {
+  content: "\e044";
+}
+.glyphicon-print:before {
+  content: "\e045";
+}
+.glyphicon-camera:before {
+  content: "\e046";
+}
+.glyphicon-font:before {
+  content: "\e047";
+}
+.glyphicon-bold:before {
+  content: "\e048";
+}
+.glyphicon-italic:before {
+  content: "\e049";
+}
+.glyphicon-text-height:before {
+  content: "\e050";
+}
+.glyphicon-text-width:before {
+  content: "\e051";
+}
+.glyphicon-align-left:before {
+  content: "\e052";
+}
+.glyphicon-align-center:before {
+  content: "\e053";
+}
+.glyphicon-align-right:before {
+  content: "\e054";
+}
+.glyphicon-align-justify:before {
+  content: "\e055";
+}
+.glyphicon-list:before {
+  content: "\e056";
+}
+.glyphicon-indent-left:before {
+  content: "\e057";
+}
+.glyphicon-indent-right:before {
+  content: "\e058";
+}
+.glyphicon-facetime-video:before {
+  content: "\e059";
+}
+.glyphicon-picture:before {
+  content: "\e060";
+}
+.glyphicon-map-marker:before {
+  content: "\e062";
+}
+.glyphicon-adjust:before {
+  content: "\e063";
+}
+.glyphicon-tint:before {
+  content: "\e064";
+}
+.glyphicon-edit:before {
+  content: "\e065";
+}
+.glyphicon-share:before {
+  content: "\e066";
+}
+.glyphicon-check:before {
+  content: "\e067";
+}
+.glyphicon-move:before {
+  content: "\e068";
+}
+.glyphicon-step-backward:before {
+  content: "\e069";
+}
+.glyphicon-fast-backward:before {
+  content: "\e070";
+}
+.glyphicon-backward:before {
+  content: "\e071";
+}
+.glyphicon-play:before {
+  content: "\e072";
+}
+.glyphicon-pause:before {
+  content: "\e073";
+}
+.glyphicon-stop:before {
+  content: "\e074";
+}
+.glyphicon-forward:before {
+  content: "\e075";
+}
+.glyphicon-fast-forward:before {
+  content: "\e076";
+}
+.glyphicon-step-forward:before {
+  content: "\e077";
+}
+.glyphicon-eject:before {
+  content: "\e078";
+}
+.glyphicon-chevron-left:before {
+  content: "\e079";
+}
+.glyphicon-chevron-right:before {
+  content: "\e080";
+}
+.glyphicon-plus-sign:before {
+  content: "\e081";
+}
+.glyphicon-minus-sign:before {
+  content: "\e082";
+}
+.glyphicon-remove-sign:before {
+  content: "\e083";
+}
+.glyphicon-ok-sign:before {
+  content: "\e084";
+}
+.glyphicon-question-sign:before {
+  content: "\e085";
+}
+.glyphicon-info-sign:before {
+  content: "\e086";
+}
+.glyphicon-screenshot:before {
+  content: "\e087";
+}
+.glyphicon-remove-circle:before {
+  content: "\e088";
+}
+.glyphicon-ok-circle:before {
+  content: "\e089";
+}
+.glyphicon-ban-circle:before {
+  content: "\e090";
+}
+.glyphicon-arrow-left:before {
+  content: "\e091";
+}
+.glyphicon-arrow-right:before {
+  content: "\e092";
+}
+.glyphicon-arrow-up:before {
+  content: "\e093";
+}
+.glyphicon-arrow-down:before {
+  content: "\e094";
+}
+.glyphicon-share-alt:before {
+  content: "\e095";
+}
+.glyphicon-resize-full:before {
+  content: "\e096";
+}
+.glyphicon-resize-small:before {
+  content: "\e097";
+}
+.glyphicon-exclamation-sign:before {
+  content: "\e101";
+}
+.glyphicon-gift:before {
+  content: "\e102";
+}
+.glyphicon-leaf:before {
+  content: "\e103";
+}
+.glyphicon-fire:before {
+  content: "\e104";
+}
+.glyphicon-eye-open:before {
+  content: "\e105";
+}
+.glyphicon-eye-close:before {
+  content: "\e106";
+}
+.glyphicon-warning-sign:before {
+  content: "\e107";
+}
+.glyphicon-plane:before {
+  content: "\e108";
+}
+.glyphicon-calendar:before {
+  content: "\e109";
+}
+.glyphicon-random:before {
+  content: "\e110";
+}
+.glyphicon-comment:before {
+  content: "\e111";
+}
+.glyphicon-magnet:before {
+  content: "\e112";
+}
+.glyphicon-chevron-up:before {
+  content: "\e113";
+}
+.glyphicon-chevron-down:before {
+  content: "\e114";
+}
+.glyphicon-retweet:before {
+  content: "\e115";
+}
+.glyphicon-shopping-cart:before {
+  content: "\e116";
+}
+.glyphicon-folder-close:before {
+  content: "\e117";
+}
+.glyphicon-folder-open:before {
+  content: "\e118";
+}
+.glyphicon-resize-vertical:before {
+  content: "\e119";
+}
+.glyphicon-resize-horizontal:before {
+  content: "\e120";
+}
+.glyphicon-hdd:before {
+  content: "\e121";
+}
+.glyphicon-bullhorn:before {
+  content: "\e122";
+}
+.glyphicon-bell:before {
+  content: "\e123";
+}
+.glyphicon-certificate:before {
+  content: "\e124";
+}
+.glyphicon-thumbs-up:before {
+  content: "\e125";
+}
+.glyphicon-thumbs-down:before {
+  content: "\e126";
+}
+.glyphicon-hand-right:before {
+  content: "\e127";
+}
+.glyphicon-hand-left:before {
+  content: "\e128";
+}
+.glyphicon-hand-up:before {
+  content: "\e129";
+}
+.glyphicon-hand-down:before {
+  content: "\e130";
+}
+.glyphicon-circle-arrow-right:before {
+  content: "\e131";
+}
+.glyphicon-circle-arrow-left:before {
+  content: "\e132";
+}
+.glyphicon-circle-arrow-up:before {
+  content: "\e133";
+}
+.glyphicon-circle-arrow-down:before {
+  content: "\e134";
+}
+.glyphicon-globe:before {
+  content: "\e135";
+}
+.glyphicon-wrench:before {
+  content: "\e136";
+}
+.glyphicon-tasks:before {
+  content: "\e137";
+}
+.glyphicon-filter:before {
+  content: "\e138";
+}
+.glyphicon-briefcase:before {
+  content: "\e139";
+}
+.glyphicon-fullscreen:before {
+  content: "\e140";
+}
+.glyphicon-dashboard:before {
+  content: "\e141";
+}
+.glyphicon-paperclip:before {
+  content: "\e142";
+}
+.glyphicon-heart-empty:before {
+  content: "\e143";
+}
+.glyphicon-link:before {
+  content: "\e144";
+}
+.glyphicon-phone:before {
+  content: "\e145";
+}
+.glyphicon-pushpin:before {
+  content: "\e146";
+}
+.glyphicon-usd:before {
+  content: "\e148";
+}
+.glyphicon-gbp:before {
+  content: "\e149";
+}
+.glyphicon-sort:before {
+  content: "\e150";
+}
+.glyphicon-sort-by-alphabet:before {
+  content: "\e151";
+}
+.glyphicon-sort-by-alphabet-alt:before {
+  content: "\e152";
+}
+.glyphicon-sort-by-order:before {
+  content: "\e153";
+}
+.glyphicon-sort-by-order-alt:before {
+  content: "\e154";
+}
+.glyphicon-sort-by-attributes:before {
+  content: "\e155";
+}
+.glyphicon-sort-by-attributes-alt:before {
+  content: "\e156";
+}
+.glyphicon-unchecked:before {
+  content: "\e157";
+}
+.glyphicon-expand:before {
+  content: "\e158";
+}
+.glyphicon-collapse-down:before {
+  content: "\e159";
+}
+.glyphicon-collapse-up:before {
+  content: "\e160";
+}
+.glyphicon-log-in:before {
+  content: "\e161";
+}
+.glyphicon-flash:before {
+  content: "\e162";
+}
+.glyphicon-log-out:before {
+  content: "\e163";
+}
+.glyphicon-new-window:before {
+  content: "\e164";
+}
+.glyphicon-record:before {
+  content: "\e165";
+}
+.glyphicon-save:before {
+  content: "\e166";
+}
+.glyphicon-open:before {
+  content: "\e167";
+}
+.glyphicon-saved:before {
+  content: "\e168";
+}
+.glyphicon-import:before {
+  content: "\e169";
+}
+.glyphicon-export:before {
+  content: "\e170";
+}
+.glyphicon-send:before {
+  content: "\e171";
+}
+.glyphicon-floppy-disk:before {
+  content: "\e172";
+}
+.glyphicon-floppy-saved:before {
+  content: "\e173";
+}
+.glyphicon-floppy-remove:before {
+  content: "\e174";
+}
+.glyphicon-floppy-save:before {
+  content: "\e175";
+}
+.glyphicon-floppy-open:before {
+  content: "\e176";
+}
+.glyphicon-credit-card:before {
+  content: "\e177";
+}
+.glyphicon-transfer:before {
+  content: "\e178";
+}
+.glyphicon-cutlery:before {
+  content: "\e179";
+}
+.glyphicon-header:before {
+  content: "\e180";
+}
+.glyphicon-compressed:before {
+  content: "\e181";
+}
+.glyphicon-earphone:before {
+  content: "\e182";
+}
+.glyphicon-phone-alt:before {
+  content: "\e183";
+}
+.glyphicon-tower:before {
+  content: "\e184";
+}
+.glyphicon-stats:before {
+  content: "\e185";
+}
+.glyphicon-sd-video:before {
+  content: "\e186";
+}
+.glyphicon-hd-video:before {
+  content: "\e187";
+}
+.glyphicon-subtitles:before {
+  content: "\e188";
+}
+.glyphicon-sound-stereo:before {
+  content: "\e189";
+}
+.glyphicon-sound-dolby:before {
+  content: "\e190";
+}
+.glyphicon-sound-5-1:before {
+  content: "\e191";
+}
+.glyphicon-sound-6-1:before {
+  content: "\e192";
+}
+.glyphicon-sound-7-1:before {
+  content: "\e193";
+}
+.glyphicon-copyright-mark:before {
+  content: "\e194";
+}
+.glyphicon-registration-mark:before {
+  content: "\e195";
+}
+.glyphicon-cloud-download:before {
+  content: "\e197";
+}
+.glyphicon-cloud-upload:before {
+  content: "\e198";
+}
+.glyphicon-tree-conifer:before {
+  content: "\e199";
+}
+.glyphicon-tree-deciduous:before {
+  content: "\e200";
+}
+* {
+  -webkit-box-sizing: border-box;
+     -moz-box-sizing: border-box;
+          box-sizing: border-box;
+}
+*:before,
+*:after {
+  -webkit-box-sizing: border-box;
+     -moz-box-sizing: border-box;
+          box-sizing: border-box;
+}
+html {
+  font-size: 10px;
+
+  -webkit-tap-highlight-color: rgba(0, 0, 0, 0);
+}
+body {
+  font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
+  font-size: 14px;
+  line-height: 1.42857143;
+  color: #333;
+  background-color: #fff;
+}
+input,
+button,
+select,
+textarea {
+  font-family: inherit;
+  font-size: inherit;
+  line-height: inherit;
+}
+a {
+  color: #428bca;
+  text-decoration: none;
+}
+a:hover,
+a:focus {
+  color: #2a6496;
+  text-decoration: underline;
+}
+a:focus {
+  outline: thin dotted;
+  outline: 5px auto -webkit-focus-ring-color;
+  outline-offset: -2px;
+}
+figure {
+  margin: 0;
+}
+img {
+  vertical-align: middle;
+}
+.img-responsive,
+.thumbnail > img,
+.thumbnail a > img,
+.carousel-inner > .item > img,
+.carousel-inner > .item > a > img {
+  display: block;
+  width: 100% \9;
+  max-width: 100%;
+  height: auto;
+}
+.img-rounded {
+  border-radius: 6px;
+}
+.img-thumbnail {
+  display: inline-block;
+  width: 100% \9;
+  max-width: 100%;
+  height: auto;
+  padding: 4px;
+  line-height: 1.42857143;
+  background-color: #fff;
+  border: 1px solid #ddd;
+  border-radius: 4px;
+  -webkit-transition: all .2s ease-in-out;
+       -o-transition: all .2s ease-in-out;
+          transition: all .2s ease-in-out;
+}
+.img-circle {
+  border-radius: 50%;
+}
+hr {
+  margin-top: 20px;
+  margin-bottom: 20px;
+  border: 0;
+  border-top: 1px solid #eee;
+}
+.sr-only {
+  position: absolute;
+  width: 1px;
+  height: 1px;
+  padding: 0;
+  margin: -1px;
+  overflow: hidden;
+  clip: rect(0, 0, 0, 0);
+  border: 0;
+}
+.sr-only-focusable:active,
+.sr-only-focusable:focus {
+  position: static;
+  width: auto;
+  height: auto;
+  margin: 0;
+  overflow: visible;
+  clip: auto;
+}
+h1,
+h2,
+h3,
+h4,
+h5,
+h6,
+.h1,
+.h2,
+.h3,
+.h4,
+.h5,
+.h6 {
+  font-family: inherit;
+  font-weight: 500;
+  line-height: 1.1;
+  color: inherit;
+}
+h1 small,
+h2 small,
+h3 small,
+h4 small,
+h5 small,
+h6 small,
+.h1 small,
+.h2 small,
+.h3 small,
+.h4 small,
+.h5 small,
+.h6 small,
+h1 .small,
+h2 .small,
+h3 .small,
+h4 .small,
+h5 .small,
+h6 .small,
+.h1 .small,
+.h2 .small,
+.h3 .small,
+.h4 .small,
+.h5 .small,
+.h6 .small {
+  font-weight: normal;
+  line-height: 1;
+  color: #777;
+}
+h1,
+.h1,
+h2,
+.h2,
+h3,
+.h3 {
+  margin-top: 20px;
+  margin-bottom: 10px;
+}
+h1 small,
+.h1 small,
+h2 small,
+.h2 small,
+h3 small,
+.h3 small,
+h1 .small,
+.h1 .small,
+h2 .small,
+.h2 .small,
+h3 .small,
+.h3 .small {
+  font-size: 65%;
+}
+h4,
+.h4,
+h5,
+.h5,
+h6,
+.h6 {
+  margin-top: 10px;
+  margin-bottom: 10px;
+}
+h4 small,
+.h4 small,
+h5 small,
+.h5 small,
+h6 small,
+.h6 small,
+h4 .small,
+.h4 .small,
+h5 .small,
+.h5 .small,
+h6 .small,
+.h6 .small {
+  font-size: 75%;
+}
+h1,
+.h1 {
+  font-size: 36px;
+}
+h2,
+.h2 {
+  font-size: 30px;
+}
+h3,
+.h3 {
+  font-size: 24px;
+}
+h4,
+.h4 {
+  font-size: 18px;
+}
+h5,
+.h5 {
+  font-size: 14px;
+}
+h6,
+.h6 {
+  font-size: 12px;
+}
+p {
+  margin: 0 0 10px;
+}
+.lead {
+  margin-bottom: 20px;
+  font-size: 16px;
+  font-weight: 300;
+  line-height: 1.4;
+}
+@media (min-width: 768px) {
+  .lead {
+    font-size: 21px;
+  }
+}
+small,
+.small {
+  font-size: 85%;
+}
+cite {
+  font-style: normal;
+}
+mark,
+.mark {
+  padding: .2em;
+  background-color: #fcf8e3;
+}
+.text-left {
+  text-align: left;
+}
+.text-right {
+  text-align: right;
+}
+.text-center {
+  text-align: center;
+}
+.text-justify {
+  text-align: justify;
+}
+.text-nowrap {
+  white-space: nowrap;
+}
+.text-lowercase {
+  text-transform: lowercase;
+}
+.text-uppercase {
+  text-transform: uppercase;
+}
+.text-capitalize {
+  text-transform: capitalize;
+}
+.text-muted {
+  color: #777;
+}
+.text-primary {
+  color: #428bca;
+}
+a.text-primary:hover {
+  color: #3071a9;
+}
+.text-success {
+  color: #3c763d;
+}
+a.text-success:hover {
+  color: #2b542c;
+}
+.text-info {
+  color: #31708f;
+}
+a.text-info:hover {
+  color: #245269;
+}
+.text-warning {
+  color: #8a6d3b;
+}
+a.text-warning:hover {
+  color: #66512c;
+}
+.text-danger {
+  color: #a94442;
+}
+a.text-danger:hover {
+  color: #843534;
+}
+.bg-primary {
+  color: #fff;
+  background-color: #428bca;
+}
+a.bg-primary:hover {
+  background-color: #3071a9;
+}
+.bg-success {
+  background-color: #dff0d8;
+}
+a.bg-success:hover {
+  background-color: #c1e2b3;
+}
+.bg-info {
+  background-color: #d9edf7;
+}
+a.bg-info:hover {
+  background-color: #afd9ee;
+}
+.bg-warning {
+  background-color: #fcf8e3;
+}
+a.bg-warning:hover {
+  background-color: #f7ecb5;
+}
+.bg-danger {
+  background-color: #f2dede;
+}
+a.bg-danger:hover {
+  background-color: #e4b9b9;
+}
+.page-header {
+  padding-bottom: 9px;
+  margin: 40px 0 20px;
+  border-bottom: 1px solid #eee;
+}
+ul,
+ol {
+  margin-top: 0;
+  margin-bottom: 10px;
+}
+ul ul,
+ol ul,
+ul ol,
+ol ol {
+  margin-bottom: 0;
+}
+.list-unstyled {
+  padding-left: 0;
+  list-style: none;
+}
+.list-inline {
+  padding-left: 0;
+  margin-left: -5px;
+  list-style: none;
+}
+.list-inline > li {
+  display: inline-block;
+  padding-right: 5px;
+  padding-left: 5px;
+}
+dl {
+  margin-top: 0;
+  margin-bottom: 20px;
+}
+dt,
+dd {
+  line-height: 1.42857143;
+}
+dt {
+  font-weight: bold;
+}
+dd {
+  margin-left: 0;
+}
+@media (min-width: 768px) {
+  .dl-horizontal dt {
+    float: left;
+    width: 160px;
+    overflow: hidden;
+    clear: left;
+    text-align: right;
+    text-overflow: ellipsis;
+    white-space: nowrap;
+  }
+  .dl-horizontal dd {
+    margin-left: 180px;
+  }
+}
+abbr[title],
+abbr[data-original-title] {
+  cursor: help;
+  border-bottom: 1px dotted #777;
+}
+.initialism {
+  font-size: 90%;
+  text-transform: uppercase;
+}
+blockquote {
+  padding: 10px 20px;
+  margin: 0 0 20px;
+  font-size: 17.5px;
+  border-left: 5px solid #eee;
+}
+blockquote p:last-child,
+blockquote ul:last-child,
+blockquote ol:last-child {
+  margin-bottom: 0;
+}
+blockquote footer,
+blockquote small,
+blockquote .small {
+  display: block;
+  font-size: 80%;
+  line-height: 1.42857143;
+  color: #777;
+}
+blockquote footer:before,
+blockquote small:before,
+blockquote .small:before {
+  content: '\2014 \00A0';
+}
+.blockquote-reverse,
+blockquote.pull-right {
+  padding-right: 15px;
+  padding-left: 0;
+  text-align: right;
+  border-right: 5px solid #eee;
+  border-left: 0;
+}
+.blockquote-reverse footer:before,
+blockquote.pull-right footer:before,
+.blockquote-reverse small:before,
+blockquote.pull-right small:before,
+.blockquote-reverse .small:before,
+blockquote.pull-right .small:before {
+  content: '';
+}
+.blockquote-reverse footer:after,
+blockquote.pull-right footer:after,
+.blockquote-reverse small:after,
+blockquote.pull-right small:after,
+.blockquote-reverse .small:after,
+blockquote.pull-right .small:after {
+  content: '\00A0 \2014';
+}
+blockquote:before,
+blockquote:after {
+  content: "";
+}
+address {
+  margin-bottom: 20px;
+  font-style: normal;
+  line-height: 1.42857143;
+}
+code,
+kbd,
+pre,
+samp {
+  font-family: Menlo, Monaco, Consolas, "Courier New", monospace;
+}
+code {
+  padding: 2px 4px;
+  font-size: 90%;
+  color: #c7254e;
+  background-color: #f9f2f4;
+  border-radius: 4px;
+}
+kbd {
+  padding: 2px 4px;
+  font-size: 90%;
+  color: #fff;
+  background-color: #333;
+  border-radius: 3px;
+  -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .25);
+          box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .25);
+}
+kbd kbd {
+  padding: 0;
+  font-size: 100%;
+  -webkit-box-shadow: none;
+          box-shadow: none;
+}
+pre {
+  display: block;
+  padding: 9.5px;
+  margin: 0 0 10px;
+  font-size: 13px;
+  line-height: 1.42857143;
+  color: #333;
+  word-break: break-all;
+  word-wrap: break-word;
+  background-color: #f5f5f5;
+  border: 1px solid #ccc;
+  border-radius: 4px;
+}
+pre code {
+  padding: 0;
+  font-size: inherit;
+  color: inherit;
+  white-space: pre-wrap;
+  background-color: transparent;
+  border-radius: 0;
+}
+.pre-scrollable {
+  max-height: 340px;
+  overflow-y: scroll;
+}
+.container {
+  padding-right: 15px;
+  padding-left: 15px;
+  margin-right: auto;
+  margin-left: auto;
+}
+@media (min-width: 768px) {
+  .container {
+    width: 750px;
+  }
+}
+@media (min-width: 992px) {
+  .container {
+    width: 970px;
+  }
+}
+@media (min-width: 1200px) {
+  .container {
+    width: 1170px;
+  }
+}
+.container-fluid {
+  padding-right: 15px;
+  padding-left: 15px;
+  margin-right: auto;
+  margin-left: auto;
+}
+.row {
+  margin-right: -15px;
+  margin-left: -15px;
+}
+.col-xs-1, .col-sm-1, .col-md-1, .col-lg-1, .col-xs-2, .col-sm-2, .col-md-2, .col-lg-2, .col-xs-3, .col-sm-3, .col-md-3, .col-lg-3, .col-xs-4, .col-sm-4, .col-md-4, .col-lg-4, .col-xs-5, .col-sm-5, .col-md-5, .col-lg-5, .col-xs-6, .col-sm-6, .col-md-6, .col-lg-6, .col-xs-7, .col-sm-7, .col-md-7, .col-lg-7, .col-xs-8, .col-sm-8, .col-md-8, .col-lg-8, .col-xs-9, .col-sm-9, .col-md-9, .col-lg-9, .col-xs-10, .col-sm-10, .col-md-10, .col-lg-10, .col-xs-11, .col-sm-11, .col-md-11, .col-lg-11,  [...]
+  position: relative;
+  min-height: 1px;
+  padding-right: 15px;
+  padding-left: 15px;
+}
+.col-xs-1, .col-xs-2, .col-xs-3, .col-xs-4, .col-xs-5, .col-xs-6, .col-xs-7, .col-xs-8, .col-xs-9, .col-xs-10, .col-xs-11, .col-xs-12 {
+  float: left;
+}
+.col-xs-12 {
+  width: 100%;
+}
+.col-xs-11 {
+  width: 91.66666667%;
+}
+.col-xs-10 {
+  width: 83.33333333%;
+}
+.col-xs-9 {
+  width: 75%;
+}
+.col-xs-8 {
+  width: 66.66666667%;
+}
+.col-xs-7 {
+  width: 58.33333333%;
+}
+.col-xs-6 {
+  width: 50%;
+}
+.col-xs-5 {
+  width: 41.66666667%;
+}
+.col-xs-4 {
+  width: 33.33333333%;
+}
+.col-xs-3 {
+  width: 25%;
+}
+.col-xs-2 {
+  width: 16.66666667%;
+}
+.col-xs-1 {
+  width: 8.33333333%;
+}
+.col-xs-pull-12 {
+  right: 100%;
+}
+.col-xs-pull-11 {
+  right: 91.66666667%;
+}
+.col-xs-pull-10 {
+  right: 83.33333333%;
+}
+.col-xs-pull-9 {
+  right: 75%;
+}
+.col-xs-pull-8 {
+  right: 66.66666667%;
+}
+.col-xs-pull-7 {
+  right: 58.33333333%;
+}
+.col-xs-pull-6 {
+  right: 50%;
+}
+.col-xs-pull-5 {
+  right: 41.66666667%;
+}
+.col-xs-pull-4 {
+  right: 33.33333333%;
+}
+.col-xs-pull-3 {
+  right: 25%;
+}
+.col-xs-pull-2 {
+  right: 16.66666667%;
+}
+.col-xs-pull-1 {
+  right: 8.33333333%;
+}
+.col-xs-pull-0 {
+  right: auto;
+}
+.col-xs-push-12 {
+  left: 100%;
+}
+.col-xs-push-11 {
+  left: 91.66666667%;
+}
+.col-xs-push-10 {
+  left: 83.33333333%;
+}
+.col-xs-push-9 {
+  left: 75%;
+}
+.col-xs-push-8 {
+  left: 66.66666667%;
+}
+.col-xs-push-7 {
+  left: 58.33333333%;
+}
+.col-xs-push-6 {
+  left: 50%;
+}
+.col-xs-push-5 {
+  left: 41.66666667%;
+}
+.col-xs-push-4 {
+  left: 33.33333333%;
+}
+.col-xs-push-3 {
+  left: 25%;
+}
+.col-xs-push-2 {
+  left: 16.66666667%;
+}
+.col-xs-push-1 {
+  left: 8.33333333%;
+}
+.col-xs-push-0 {
+  left: auto;
+}
+.col-xs-offset-12 {
+  margin-left: 100%;
+}
+.col-xs-offset-11 {
+  margin-left: 91.66666667%;
+}
+.col-xs-offset-10 {
+  margin-left: 83.33333333%;
+}
+.col-xs-offset-9 {
+  margin-left: 75%;
+}
+.col-xs-offset-8 {
+  margin-left: 66.66666667%;
+}
+.col-xs-offset-7 {
+  margin-left: 58.33333333%;
+}
+.col-xs-offset-6 {
+  margin-left: 50%;
+}
+.col-xs-offset-5 {
+  margin-left: 41.66666667%;
+}
+.col-xs-offset-4 {
+  margin-left: 33.33333333%;
+}
+.col-xs-offset-3 {
+  margin-left: 25%;
+}
+.col-xs-offset-2 {
+  margin-left: 16.66666667%;
+}
+.col-xs-offset-1 {
+  margin-left: 8.33333333%;
+}
+.col-xs-offset-0 {
+  margin-left: 0;
+}
+@media (min-width: 768px) {
+  .col-sm-1, .col-sm-2, .col-sm-3, .col-sm-4, .col-sm-5, .col-sm-6, .col-sm-7, .col-sm-8, .col-sm-9, .col-sm-10, .col-sm-11, .col-sm-12 {
+    float: left;
+  }
+  .col-sm-12 {
+    width: 100%;
+  }
+  .col-sm-11 {
+    width: 91.66666667%;
+  }
+  .col-sm-10 {
+    width: 83.33333333%;
+  }
+  .col-sm-9 {
+    width: 75%;
+  }
+  .col-sm-8 {
+    width: 66.66666667%;
+  }
+  .col-sm-7 {
+    width: 58.33333333%;
+  }
+  .col-sm-6 {
+    width: 50%;
+  }
+  .col-sm-5 {
+    width: 41.66666667%;
+  }
+  .col-sm-4 {
+    width: 33.33333333%;
+  }
+  .col-sm-3 {
+    width: 25%;
+  }
+  .col-sm-2 {
+    width: 16.66666667%;
+  }
+  .col-sm-1 {
+    width: 8.33333333%;
+  }
+  .col-sm-pull-12 {
+    right: 100%;
+  }
+  .col-sm-pull-11 {
+    right: 91.66666667%;
+  }
+  .col-sm-pull-10 {
+    right: 83.33333333%;
+  }
+  .col-sm-pull-9 {
+    right: 75%;
+  }
+  .col-sm-pull-8 {
+    right: 66.66666667%;
+  }
+  .col-sm-pull-7 {
+    right: 58.33333333%;
+  }
+  .col-sm-pull-6 {
+    right: 50%;
+  }
+  .col-sm-pull-5 {
+    right: 41.66666667%;
+  }
+  .col-sm-pull-4 {
+    right: 33.33333333%;
+  }
+  .col-sm-pull-3 {
+    right: 25%;
+  }
+  .col-sm-pull-2 {
+    right: 16.66666667%;
+  }
+  .col-sm-pull-1 {
+    right: 8.33333333%;
+  }
+  .col-sm-pull-0 {
+    right: auto;
+  }
+  .col-sm-push-12 {
+    left: 100%;
+  }
+  .col-sm-push-11 {
+    left: 91.66666667%;
+  }
+  .col-sm-push-10 {
+    left: 83.33333333%;
+  }
+  .col-sm-push-9 {
+    left: 75%;
+  }
+  .col-sm-push-8 {
+    left: 66.66666667%;
+  }
+  .col-sm-push-7 {
+    left: 58.33333333%;
+  }
+  .col-sm-push-6 {
+    left: 50%;
+  }
+  .col-sm-push-5 {
+    left: 41.66666667%;
+  }
+  .col-sm-push-4 {
+    left: 33.33333333%;
+  }
+  .col-sm-push-3 {
+    left: 25%;
+  }
+  .col-sm-push-2 {
+    left: 16.66666667%;
+  }
+  .col-sm-push-1 {
+    left: 8.33333333%;
+  }
+  .col-sm-push-0 {
+    left: auto;
+  }
+  .col-sm-offset-12 {
+    margin-left: 100%;
+  }
+  .col-sm-offset-11 {
+    margin-left: 91.66666667%;
+  }
+  .col-sm-offset-10 {
+    margin-left: 83.33333333%;
+  }
+  .col-sm-offset-9 {
+    margin-left: 75%;
+  }
+  .col-sm-offset-8 {
+    margin-left: 66.66666667%;
+  }
+  .col-sm-offset-7 {
+    margin-left: 58.33333333%;
+  }
+  .col-sm-offset-6 {
+    margin-left: 50%;
+  }
+  .col-sm-offset-5 {
+    margin-left: 41.66666667%;
+  }
+  .col-sm-offset-4 {
+    margin-left: 33.33333333%;
+  }
+  .col-sm-offset-3 {
+    margin-left: 25%;
+  }
+  .col-sm-offset-2 {
+    margin-left: 16.66666667%;
+  }
+  .col-sm-offset-1 {
+    margin-left: 8.33333333%;
+  }
+  .col-sm-offset-0 {
+    margin-left: 0;
+  }
+}
+@media (min-width: 992px) {
+  .col-md-1, .col-md-2, .col-md-3, .col-md-4, .col-md-5, .col-md-6, .col-md-7, .col-md-8, .col-md-9, .col-md-10, .col-md-11, .col-md-12 {
+    float: left;
+  }
+  .col-md-12 {
+    width: 100%;
+  }
+  .col-md-11 {
+    width: 91.66666667%;
+  }
+  .col-md-10 {
+    width: 83.33333333%;
+  }
+  .col-md-9 {
+    width: 75%;
+  }
+  .col-md-8 {
+    width: 66.66666667%;
+  }
+  .col-md-7 {
+    width: 58.33333333%;
+  }
+  .col-md-6 {
+    width: 50%;
+  }
+  .col-md-5 {
+    width: 41.66666667%;
+  }
+  .col-md-4 {
+    width: 33.33333333%;
+  }
+  .col-md-3 {
+    width: 25%;
+  }
+  .col-md-2 {
+    width: 16.66666667%;
+  }
+  .col-md-1 {
+    width: 8.33333333%;
+  }
+  .col-md-pull-12 {
+    right: 100%;
+  }
+  .col-md-pull-11 {
+    right: 91.66666667%;
+  }
+  .col-md-pull-10 {
+    right: 83.33333333%;
+  }
+  .col-md-pull-9 {
+    right: 75%;
+  }
+  .col-md-pull-8 {
+    right: 66.66666667%;
+  }
+  .col-md-pull-7 {
+    right: 58.33333333%;
+  }
+  .col-md-pull-6 {
+    right: 50%;
+  }
+  .col-md-pull-5 {
+    right: 41.66666667%;
+  }
+  .col-md-pull-4 {
+    right: 33.33333333%;
+  }
+  .col-md-pull-3 {
+    right: 25%;
+  }
+  .col-md-pull-2 {
+    right: 16.66666667%;
+  }
+  .col-md-pull-1 {
+    right: 8.33333333%;
+  }
+  .col-md-pull-0 {
+    right: auto;
+  }
+  .col-md-push-12 {
+    left: 100%;
+  }
+  .col-md-push-11 {
+    left: 91.66666667%;
+  }
+  .col-md-push-10 {
+    left: 83.33333333%;
+  }
+  .col-md-push-9 {
+    left: 75%;
+  }
+  .col-md-push-8 {
+    left: 66.66666667%;
+  }
+  .col-md-push-7 {
+    left: 58.33333333%;
+  }
+  .col-md-push-6 {
+    left: 50%;
+  }
+  .col-md-push-5 {
+    left: 41.66666667%;
+  }
+  .col-md-push-4 {
+    left: 33.33333333%;
+  }
+  .col-md-push-3 {
+    left: 25%;
+  }
+  .col-md-push-2 {
+    left: 16.66666667%;
+  }
+  .col-md-push-1 {
+    left: 8.33333333%;
+  }
+  .col-md-push-0 {
+    left: auto;
+  }
+  .col-md-offset-12 {
+    margin-left: 100%;
+  }
+  .col-md-offset-11 {
+    margin-left: 91.66666667%;
+  }
+  .col-md-offset-10 {
+    margin-left: 83.33333333%;
+  }
+  .col-md-offset-9 {
+    margin-left: 75%;
+  }
+  .col-md-offset-8 {
+    margin-left: 66.66666667%;
+  }
+  .col-md-offset-7 {
+    margin-left: 58.33333333%;
+  }
+  .col-md-offset-6 {
+    margin-left: 50%;
+  }
+  .col-md-offset-5 {
+    margin-left: 41.66666667%;
+  }
+  .col-md-offset-4 {
+    margin-left: 33.33333333%;
+  }
+  .col-md-offset-3 {
+    margin-left: 25%;
+  }
+  .col-md-offset-2 {
+    margin-left: 16.66666667%;
+  }
+  .col-md-offset-1 {
+    margin-left: 8.33333333%;
+  }
+  .col-md-offset-0 {
+    margin-left: 0;
+  }
+}
+@media (min-width: 1200px) {
+  .col-lg-1, .col-lg-2, .col-lg-3, .col-lg-4, .col-lg-5, .col-lg-6, .col-lg-7, .col-lg-8, .col-lg-9, .col-lg-10, .col-lg-11, .col-lg-12 {
+    float: left;
+  }
+  .col-lg-12 {
+    width: 100%;
+  }
+  .col-lg-11 {
+    width: 91.66666667%;
+  }
+  .col-lg-10 {
+    width: 83.33333333%;
+  }
+  .col-lg-9 {
+    width: 75%;
+  }
+  .col-lg-8 {
+    width: 66.66666667%;
+  }
+  .col-lg-7 {
+    width: 58.33333333%;
+  }
+  .col-lg-6 {
+    width: 50%;
+  }
+  .col-lg-5 {
+    width: 41.66666667%;
+  }
+  .col-lg-4 {
+    width: 33.33333333%;
+  }
+  .col-lg-3 {
+    width: 25%;
+  }
+  .col-lg-2 {
+    width: 16.66666667%;
+  }
+  .col-lg-1 {
+    width: 8.33333333%;
+  }
+  .col-lg-pull-12 {
+    right: 100%;
+  }
+  .col-lg-pull-11 {
+    right: 91.66666667%;
+  }
+  .col-lg-pull-10 {
+    right: 83.33333333%;
+  }
+  .col-lg-pull-9 {
+    right: 75%;
+  }
+  .col-lg-pull-8 {
+    right: 66.66666667%;
+  }
+  .col-lg-pull-7 {
+    right: 58.33333333%;
+  }
+  .col-lg-pull-6 {
+    right: 50%;
+  }
+  .col-lg-pull-5 {
+    right: 41.66666667%;
+  }
+  .col-lg-pull-4 {
+    right: 33.33333333%;
+  }
+  .col-lg-pull-3 {
+    right: 25%;
+  }
+  .col-lg-pull-2 {
+    right: 16.66666667%;
+  }
+  .col-lg-pull-1 {
+    right: 8.33333333%;
+  }
+  .col-lg-pull-0 {
+    right: auto;
+  }
+  .col-lg-push-12 {
+    left: 100%;
+  }
+  .col-lg-push-11 {
+    left: 91.66666667%;
+  }
+  .col-lg-push-10 {
+    left: 83.33333333%;
+  }
+  .col-lg-push-9 {
+    left: 75%;
+  }
+  .col-lg-push-8 {
+    left: 66.66666667%;
+  }
+  .col-lg-push-7 {
+    left: 58.33333333%;
+  }
+  .col-lg-push-6 {
+    left: 50%;
+  }
+  .col-lg-push-5 {
+    left: 41.66666667%;
+  }
+  .col-lg-push-4 {
+    left: 33.33333333%;
+  }
+  .col-lg-push-3 {
+    left: 25%;
+  }
+  .col-lg-push-2 {
+    left: 16.66666667%;
+  }
+  .col-lg-push-1 {
+    left: 8.33333333%;
+  }
+  .col-lg-push-0 {
+    left: auto;
+  }
+  .col-lg-offset-12 {
+    margin-left: 100%;
+  }
+  .col-lg-offset-11 {
+    margin-left: 91.66666667%;
+  }
+  .col-lg-offset-10 {
+    margin-left: 83.33333333%;
+  }
+  .col-lg-offset-9 {
+    margin-left: 75%;
+  }
+  .col-lg-offset-8 {
+    margin-left: 66.66666667%;
+  }
+  .col-lg-offset-7 {
+    margin-left: 58.33333333%;
+  }
+  .col-lg-offset-6 {
+    margin-left: 50%;
+  }
+  .col-lg-offset-5 {
+    margin-left: 41.66666667%;
+  }
+  .col-lg-offset-4 {
+    margin-left: 33.33333333%;
+  }
+  .col-lg-offset-3 {
+    margin-left: 25%;
+  }
+  .col-lg-offset-2 {
+    margin-left: 16.66666667%;
+  }
+  .col-lg-offset-1 {
+    margin-left: 8.33333333%;
+  }
+  .col-lg-offset-0 {
+    margin-left: 0;
+  }
+}
+table {
+  background-color: transparent;
+}
+th {
+  text-align: left;
+}
+.table {
+  width: 100%;
+  max-width: 100%;
+  margin-bottom: 20px;
+}
+.table > thead > tr > th,
+.table > tbody > tr > th,
+.table > tfoot > tr > th,
+.table > thead > tr > td,
+.table > tbody > tr > td,
+.table > tfoot > tr > td {
+  padding: 8px;
+  line-height: 1.42857143;
+  vertical-align: top;
+  border-top: 1px solid #ddd;
+}
+.table > thead > tr > th {
+  vertical-align: bottom;
+  border-bottom: 2px solid #ddd;
+}
+.table > caption + thead > tr:first-child > th,
+.table > colgroup + thead > tr:first-child > th,
+.table > thead:first-child > tr:first-child > th,
+.table > caption + thead > tr:first-child > td,
+.table > colgroup + thead > tr:first-child > td,
+.table > thead:first-child > tr:first-child > td {
+  border-top: 0;
+}
+.table > tbody + tbody {
+  border-top: 2px solid #ddd;
+}
+.table .table {
+  background-color: #fff;
+}
+.table-condensed > thead > tr > th,
+.table-condensed > tbody > tr > th,
+.table-condensed > tfoot > tr > th,
+.table-condensed > thead > tr > td,
+.table-condensed > tbody > tr > td,
+.table-condensed > tfoot > tr > td {
+  padding: 5px;
+}
+.table-bordered {
+  border: 1px solid #ddd;
+}
+.table-bordered > thead > tr > th,
+.table-bordered > tbody > tr > th,
+.table-bordered > tfoot > tr > th,
+.table-bordered > thead > tr > td,
+.table-bordered > tbody > tr > td,
+.table-bordered > tfoot > tr > td {
+  border: 1px solid #ddd;
+}
+.table-bordered > thead > tr > th,
+.table-bordered > thead > tr > td {
+  border-bottom-width: 2px;
+}
+.table-striped > tbody > tr:nth-child(odd) > td,
+.table-striped > tbody > tr:nth-child(odd) > th {
+  background-color: #f9f9f9;
+}
+.table-hover > tbody > tr:hover > td,
+.table-hover > tbody > tr:hover > th {
+  background-color: #f5f5f5;
+}
+table col[class*="col-"] {
+  position: static;
+  display: table-column;
+  float: none;
+}
+table td[class*="col-"],
+table th[class*="col-"] {
+  position: static;
+  display: table-cell;
+  float: none;
+}
+.table > thead > tr > td.active,
+.table > tbody > tr > td.active,
+.table > tfoot > tr > td.active,
+.table > thead > tr > th.active,
+.table > tbody > tr > th.active,
+.table > tfoot > tr > th.active,
+.table > thead > tr.active > td,
+.table > tbody > tr.active > td,
+.table > tfoot > tr.active > td,
+.table > thead > tr.active > th,
+.table > tbody > tr.active > th,
+.table > tfoot > tr.active > th {
+  background-color: #f5f5f5;
+}
+.table-hover > tbody > tr > td.active:hover,
+.table-hover > tbody > tr > th.active:hover,
+.table-hover > tbody > tr.active:hover > td,
+.table-hover > tbody > tr:hover > .active,
+.table-hover > tbody > tr.active:hover > th {
+  background-color: #e8e8e8;
+}
+.table > thead > tr > td.success,
+.table > tbody > tr > td.success,
+.table > tfoot > tr > td.success,
+.table > thead > tr > th.success,
+.table > tbody > tr > th.success,
+.table > tfoot > tr > th.success,
+.table > thead > tr.success > td,
+.table > tbody > tr.success > td,
+.table > tfoot > tr.success > td,
+.table > thead > tr.success > th,
+.table > tbody > tr.success > th,
+.table > tfoot > tr.success > th {
+  background-color: #dff0d8;
+}
+.table-hover > tbody > tr > td.success:hover,
+.table-hover > tbody > tr > th.success:hover,
+.table-hover > tbody > tr.success:hover > td,
+.table-hover > tbody > tr:hover > .success,
+.table-hover > tbody > tr.success:hover > th {
+  background-color: #d0e9c6;
+}
+.table > thead > tr > td.info,
+.table > tbody > tr > td.info,
+.table > tfoot > tr > td.info,
+.table > thead > tr > th.info,
+.table > tbody > tr > th.info,
+.table > tfoot > tr > th.info,
+.table > thead > tr.info > td,
+.table > tbody > tr.info > td,
+.table > tfoot > tr.info > td,
+.table > thead > tr.info > th,
+.table > tbody > tr.info > th,
+.table > tfoot > tr.info > th {
+  background-color: #d9edf7;
+}
+.table-hover > tbody > tr > td.info:hover,
+.table-hover > tbody > tr > th.info:hover,
+.table-hover > tbody > tr.info:hover > td,
+.table-hover > tbody > tr:hover > .info,
+.table-hover > tbody > tr.info:hover > th {
+  background-color: #c4e3f3;
+}
+.table > thead > tr > td.warning,
+.table > tbody > tr > td.warning,
+.table > tfoot > tr > td.warning,
+.table > thead > tr > th.warning,
+.table > tbody > tr > th.warning,
+.table > tfoot > tr > th.warning,
+.table > thead > tr.warning > td,
+.table > tbody > tr.warning > td,
+.table > tfoot > tr.warning > td,
+.table > thead > tr.warning > th,
+.table > tbody > tr.warning > th,
+.table > tfoot > tr.warning > th {
+  background-color: #fcf8e3;
+}
+.table-hover > tbody > tr > td.warning:hover,
+.table-hover > tbody > tr > th.warning:hover,
+.table-hover > tbody > tr.warning:hover > td,
+.table-hover > tbody > tr:hover > .warning,
+.table-hover > tbody > tr.warning:hover > th {
+  background-color: #faf2cc;
+}
+.table > thead > tr > td.danger,
+.table > tbody > tr > td.danger,
+.table > tfoot > tr > td.danger,
+.table > thead > tr > th.danger,
+.table > tbody > tr > th.danger,
+.table > tfoot > tr > th.danger,
+.table > thead > tr.danger > td,
+.table > tbody > tr.danger > td,
+.table > tfoot > tr.danger > td,
+.table > thead > tr.danger > th,
+.table > tbody > tr.danger > th,
+.table > tfoot > tr.danger > th {
+  background-color: #f2dede;
+}
+.table-hover > tbody > tr > td.danger:hover,
+.table-hover > tbody > tr > th.danger:hover,
+.table-hover > tbody > tr.danger:hover > td,
+.table-hover > tbody > tr:hover > .danger,
+.table-hover > tbody > tr.danger:hover > th {
+  background-color: #ebcccc;
+}
+@media screen and (max-width: 767px) {
+  .table-responsive {
+    width: 100%;
+    margin-bottom: 15px;
+    overflow-x: auto;
+    overflow-y: hidden;
+    -webkit-overflow-scrolling: touch;
+    -ms-overflow-style: -ms-autohiding-scrollbar;
+    border: 1px solid #ddd;
+  }
+  .table-responsive > .table {
+    margin-bottom: 0;
+  }
+  .table-responsive > .table > thead > tr > th,
+  .table-responsive > .table > tbody > tr > th,
+  .table-responsive > .table > tfoot > tr > th,
+  .table-responsive > .table > thead > tr > td,
+  .table-responsive > .table > tbody > tr > td,
+  .table-responsive > .table > tfoot > tr > td {
+    white-space: nowrap;
+  }
+  .table-responsive > .table-bordered {
+    border: 0;
+  }
+  .table-responsive > .table-bordered > thead > tr > th:first-child,
+  .table-responsive > .table-bordered > tbody > tr > th:first-child,
+  .table-responsive > .table-bordered > tfoot > tr > th:first-child,
+  .table-responsive > .table-bordered > thead > tr > td:first-child,
+  .table-responsive > .table-bordered > tbody > tr > td:first-child,
+  .table-responsive > .table-bordered > tfoot > tr > td:first-child {
+    border-left: 0;
+  }
+  .table-responsive > .table-bordered > thead > tr > th:last-child,
+  .table-responsive > .table-bordered > tbody > tr > th:last-child,
+  .table-responsive > .table-bordered > tfoot > tr > th:last-child,
+  .table-responsive > .table-bordered > thead > tr > td:last-child,
+  .table-responsive > .table-bordered > tbody > tr > td:last-child,
+  .table-responsive > .table-bordered > tfoot > tr > td:last-child {
+    border-right: 0;
+  }
+  .table-responsive > .table-bordered > tbody > tr:last-child > th,
+  .table-responsive > .table-bordered > tfoot > tr:last-child > th,
+  .table-responsive > .table-bordered > tbody > tr:last-child > td,
+  .table-responsive > .table-bordered > tfoot > tr:last-child > td {
+    border-bottom: 0;
+  }
+}
+fieldset {
+  min-width: 0;
+  padding: 0;
+  margin: 0;
+  border: 0;
+}
+legend {
+  display: block;
+  width: 100%;
+  padding: 0;
+  margin-bottom: 20px;
+  font-size: 21px;
+  line-height: inherit;
+  color: #333;
+  border: 0;
+  border-bottom: 1px solid #e5e5e5;
+}
+label {
+  display: inline-block;
+  max-width: 100%;
+  margin-bottom: 5px;
+  font-weight: bold;
+}
+input[type="search"] {
+  -webkit-box-sizing: border-box;
+     -moz-box-sizing: border-box;
+          box-sizing: border-box;
+}
+input[type="radio"],
+input[type="checkbox"] {
+  margin: 4px 0 0;
+  margin-top: 1px \9;
+  line-height: normal;
+}
+input[type="file"] {
+  display: block;
+}
+input[type="range"] {
+  display: block;
+  width: 100%;
+}
+select[multiple],
+select[size] {
+  height: auto;
+}
+input[type="file"]:focus,
+input[type="radio"]:focus,
+input[type="checkbox"]:focus {
+  outline: thin dotted;
+  outline: 5px auto -webkit-focus-ring-color;
+  outline-offset: -2px;
+}
+output {
+  display: block;
+  padding-top: 7px;
+  font-size: 14px;
+  line-height: 1.42857143;
+  color: #555;
+}
+.form-control {
+  display: block;
+  width: 100%;
+  height: 34px;
+  padding: 6px 12px;
+  font-size: 14px;
+  line-height: 1.42857143;
+  color: #555;
+  background-color: #fff;
+  background-image: none;
+  border: 1px solid #ccc;
+  border-radius: 4px;
+  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);
+          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);
+  -webkit-transition: border-color ease-in-out .15s, -webkit-box-shadow ease-in-out .15s;
+       -o-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;
+          transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;
+}
+.form-control:focus {
+  border-color: #66afe9;
+  outline: 0;
+  -webkit-box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, .6);
+          box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, .6);
+}
+.form-control::-moz-placeholder {
+  color: #777;
+  opacity: 1;
+}
+.form-control:-ms-input-placeholder {
+  color: #777;
+}
+.form-control::-webkit-input-placeholder {
+  color: #777;
+}
+.form-control[disabled],
+.form-control[readonly],
+fieldset[disabled] .form-control {
+  cursor: not-allowed;
+  background-color: #eee;
+  opacity: 1;
+}
+textarea.form-control {
+  height: auto;
+}
+input[type="search"] {
+  -webkit-appearance: none;
+}
+input[type="date"],
+input[type="time"],
+input[type="datetime-local"],
+input[type="month"] {
+  line-height: 34px;
+  line-height: 1.42857143 \0;
+}
+input[type="date"].input-sm,
+input[type="time"].input-sm,
+input[type="datetime-local"].input-sm,
+input[type="month"].input-sm {
+  line-height: 30px;
+}
+input[type="date"].input-lg,
+input[type="time"].input-lg,
+input[type="datetime-local"].input-lg,
+input[type="month"].input-lg {
+  line-height: 46px;
+}
+.form-group {
+  margin-bottom: 15px;
+}
+.radio,
+.checkbox {
+  position: relative;
+  display: block;
+  min-height: 20px;
+  margin-top: 10px;
+  margin-bottom: 10px;
+}
+.radio label,
+.checkbox label {
+  padding-left: 20px;
+  margin-bottom: 0;
+  font-weight: normal;
+  cursor: pointer;
+}
+.radio input[type="radio"],
+.radio-inline input[type="radio"],
+.checkbox input[type="checkbox"],
+.checkbox-inline input[type="checkbox"] {
+  position: absolute;
+  margin-top: 4px \9;
+  margin-left: -20px;
+}
+.radio + .radio,
+.checkbox + .checkbox {
+  margin-top: -5px;
+}
+.radio-inline,
+.checkbox-inline {
+  display: inline-block;
+  padding-left: 20px;
+  margin-bottom: 0;
+  font-weight: normal;
+  vertical-align: middle;
+  cursor: pointer;
+}
+.radio-inline + .radio-inline,
+.checkbox-inline + .checkbox-inline {
+  margin-top: 0;
+  margin-left: 10px;
+}
+input[type="radio"][disabled],
+input[type="checkbox"][disabled],
+input[type="radio"].disabled,
+input[type="checkbox"].disabled,
+fieldset[disabled] input[type="radio"],
+fieldset[disabled] input[type="checkbox"] {
+  cursor: not-allowed;
+}
+.radio-inline.disabled,
+.checkbox-inline.disabled,
+fieldset[disabled] .radio-inline,
+fieldset[disabled] .checkbox-inline {
+  cursor: not-allowed;
+}
+.radio.disabled label,
+.checkbox.disabled label,
+fieldset[disabled] .radio label,
+fieldset[disabled] .checkbox label {
+  cursor: not-allowed;
+}
+.form-control-static {
+  padding-top: 7px;
+  padding-bottom: 7px;
+  margin-bottom: 0;
+}
+.form-control-static.input-lg,
+.form-control-static.input-sm {
+  padding-right: 0;
+  padding-left: 0;
+}
+.input-sm,
+.form-horizontal .form-group-sm .form-control {
+  height: 30px;
+  padding: 5px 10px;
+  font-size: 12px;
+  line-height: 1.5;
+  border-radius: 3px;
+}
+select.input-sm {
+  height: 30px;
+  line-height: 30px;
+}
+textarea.input-sm,
+select[multiple].input-sm {
+  height: auto;
+}
+.input-lg,
+.form-horizontal .form-group-lg .form-control {
+  height: 46px;
+  padding: 10px 16px;
+  font-size: 18px;
+  line-height: 1.33;
+  border-radius: 6px;
+}
+select.input-lg {
+  height: 46px;
+  line-height: 46px;
+}
+textarea.input-lg,
+select[multiple].input-lg {
+  height: auto;
+}
+.has-feedback {
+  position: relative;
+}
+.has-feedback .form-control {
+  padding-right: 42.5px;
+}
+.form-control-feedback {
+  position: absolute;
+  top: 25px;
+  right: 0;
+  z-index: 2;
+  display: block;
+  width: 34px;
+  height: 34px;
+  line-height: 34px;
+  text-align: center;
+}
+.input-lg + .form-control-feedback {
+  width: 46px;
+  height: 46px;
+  line-height: 46px;
+}
+.input-sm + .form-control-feedback {
+  width: 30px;
+  height: 30px;
+  line-height: 30px;
+}
+.has-success .help-block,
+.has-success .control-label,
+.has-success .radio,
+.has-success .checkbox,
+.has-success .radio-inline,
+.has-success .checkbox-inline {
+  color: #3c763d;
+}
+.has-success .form-control {
+  border-color: #3c763d;
+  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);
+          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);
+}
+.has-success .form-control:focus {
+  border-color: #2b542c;
+  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #67b168;
+          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #67b168;
+}
+.has-success .input-group-addon {
+  color: #3c763d;
+  background-color: #dff0d8;
+  border-color: #3c763d;
+}
+.has-success .form-control-feedback {
+  color: #3c763d;
+}
+.has-warning .help-block,
+.has-warning .control-label,
+.has-warning .radio,
+.has-warning .checkbox,
+.has-warning .radio-inline,
+.has-warning .checkbox-inline {
+  color: #8a6d3b;
+}
+.has-warning .form-control {
+  border-color: #8a6d3b;
+  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);
+          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);
+}
+.has-warning .form-control:focus {
+  border-color: #66512c;
+  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #c0a16b;
+          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #c0a16b;
+}
+.has-warning .input-group-addon {
+  color: #8a6d3b;
+  background-color: #fcf8e3;
+  border-color: #8a6d3b;
+}
+.has-warning .form-control-feedback {
+  color: #8a6d3b;
+}
+.has-error .help-block,
+.has-error .control-label,
+.has-error .radio,
+.has-error .checkbox,
+.has-error .radio-inline,
+.has-error .checkbox-inline {
+  color: #a94442;
+}
+.has-error .form-control {
+  border-color: #a94442;
+  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);
+          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);
+}
+.has-error .form-control:focus {
+  border-color: #843534;
+  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #ce8483;
+          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #ce8483;
+}
+.has-error .input-group-addon {
+  color: #a94442;
+  background-color: #f2dede;
+  border-color: #a94442;
+}
+.has-error .form-control-feedback {
+  color: #a94442;
+}
+.has-feedback label.sr-only ~ .form-control-feedback {
+  top: 0;
+}
+.help-block {
+  display: block;
+  margin-top: 5px;
+  margin-bottom: 10px;
+  color: #737373;
+}
+@media (min-width: 768px) {
+  .form-inline .form-group {
+    display: inline-block;
+    margin-bottom: 0;
+    vertical-align: middle;
+  }
+  .form-inline .form-control {
... 56708 lines suppressed ...