You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tvm.apache.org by tq...@apache.org on 2021/11/25 00:20:12 UTC

[tvm-site] branch asf-site updated: Build at Wed Nov 24 16:20:02 PST 2021

This is an automated email from the ASF dual-hosted git repository.

tqchen pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/tvm-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
     new 39536b9  Build at Wed Nov 24 16:20:02 PST 2021
39536b9 is described below

commit 39536b9ee9f9d14df5efb43df38bb67fe512425e
Author: tqchen <tq...@octoml.ai>
AuthorDate: Wed Nov 24 16:20:03 2021 -0800

    Build at Wed Nov 24 16:20:02 PST 2021
---
 .gitignore                                         |   3 ++
 2017/08/17/tvm-release-announcement.html           |   2 +-
 ...s-with-TVM-A-Depthwise-Convolution-Example.html |   2 +-
 2017/10/06/nnvm-compiler-announcement.html         |   2 +-
 ...s-to-TVM-Stack-and-NNVM-Compiler-with-ROCm.html |   2 +-
 2017/11/08/android-rpc-introduction.html           |   2 +-
 2018/01/16/opt-mali-gpu.html                       |   2 +-
 2018/03/12/webgl.html                              |   2 +-
 2018/03/23/nmt-transformer-optimize.html           |   2 +-
 2018/07/12/vta-release-announcement.html           |   2 +-
 2018/08/10/DLPack-Bridge.html                      |   2 +-
 2018/10/03/auto-opt-all.html                       |   2 +-
 2018/10/09/ml-in-tees.html                         |   2 +-
 2018/12/18/lowprecision-conv.html                  |   2 +-
 2019/01/19/Golang.html                             |   2 +-
 2019/03/18/tvm-apache-announcement.html            |   2 +-
 2019/04/29/opt-cuda-quantized.html                 |   2 +-
 2019/05/30/pytorch-frontend.html                   |   2 +-
 ...machine-learning-to-webassembly-and-webgpu.html |   2 +-
 2020/06/04/tinyml-how-tvm-is-taming-tiny.html      |   2 +-
 2020/07/14/bert-pytorch-tvm.html                   |   2 +-
 .../15/how-to-bring-your-own-codegen-to-tvm.html   |   2 +-
 2020/09/26/bring-your-own-datatypes.html           |   2 +-
 2021/03/03/intro-auto-scheduler.html               |   2 +-
 atom.xml                                           |  42 ++++++++++----------
 community.html                                     |   4 ++
 download.html                                      |   6 +++
 feed.xml                                           |  20 +++++-----
 images/community/simaai.png                        | Bin 0 -> 37282 bytes
 rss.xml                                            |  44 ++++++++++-----------
 30 files changed, 89 insertions(+), 76 deletions(-)

diff --git a/.gitignore b/.gitignore
index cf6401d..72a3cf5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -6,3 +6,6 @@ website.tgz
 .jekyll-cache
 docs.tgz
 Gemfile.lock
+.bundle/
+vendor/
+
diff --git a/2017/08/17/tvm-release-announcement.html b/2017/08/17/tvm-release-announcement.html
index ea95cf0..dbd65e1 100644
--- a/2017/08/17/tvm-release-announcement.html
+++ b/2017/08/17/tvm-release-announcement.html
@@ -140,7 +140,7 @@
     <div class="span14 w-100">
       <h1>TVM: An End to End IR Stack for Deploying Deep Learning Workloads on Hardware Platforms </h1>
       <p class="post-meta">
-        <time datetime="2017-08-17T15:00:00-04:00" itemprop="datePublished">
+        <time datetime="2017-08-17T12:00:00-07:00" itemprop="datePublished">
           Aug 17, 2017
         </time>
         
diff --git a/2017/08/22/Optimize-Deep-Learning-GPU-Operators-with-TVM-A-Depthwise-Convolution-Example.html b/2017/08/22/Optimize-Deep-Learning-GPU-Operators-with-TVM-A-Depthwise-Convolution-Example.html
index 96b2e16..13a15a3 100644
--- a/2017/08/22/Optimize-Deep-Learning-GPU-Operators-with-TVM-A-Depthwise-Convolution-Example.html
+++ b/2017/08/22/Optimize-Deep-Learning-GPU-Operators-with-TVM-A-Depthwise-Convolution-Example.html
@@ -140,7 +140,7 @@
     <div class="span14 w-100">
       <h1>Optimize Deep Learning GPU Operators with TVM: A Depthwise Convolution Example </h1>
       <p class="post-meta">
-        <time datetime="2017-08-22T00:00:00-04:00" itemprop="datePublished">
+        <time datetime="2017-08-22T00:00:00-07:00" itemprop="datePublished">
           Aug 22, 2017
         </time>
         
diff --git a/2017/10/06/nnvm-compiler-announcement.html b/2017/10/06/nnvm-compiler-announcement.html
index 40557e0..b627ca6 100644
--- a/2017/10/06/nnvm-compiler-announcement.html
+++ b/2017/10/06/nnvm-compiler-announcement.html
@@ -140,7 +140,7 @@
     <div class="span14 w-100">
       <h1>NNVM Compiler: Open Compiler for AI Frameworks </h1>
       <p class="post-meta">
-        <time datetime="2017-10-06T11:30:00-04:00" itemprop="datePublished">
+        <time datetime="2017-10-06T08:30:00-07:00" itemprop="datePublished">
           Oct 6, 2017
         </time>
         
diff --git a/2017/10/30/Bringing-AMDGPUs-to-TVM-Stack-and-NNVM-Compiler-with-ROCm.html b/2017/10/30/Bringing-AMDGPUs-to-TVM-Stack-and-NNVM-Compiler-with-ROCm.html
index 06f20bd..e6a6c2f 100644
--- a/2017/10/30/Bringing-AMDGPUs-to-TVM-Stack-and-NNVM-Compiler-with-ROCm.html
+++ b/2017/10/30/Bringing-AMDGPUs-to-TVM-Stack-and-NNVM-Compiler-with-ROCm.html
@@ -140,7 +140,7 @@
     <div class="span14 w-100">
       <h1>Bringing AMDGPUs to TVM Stack and NNVM Compiler with ROCm </h1>
       <p class="post-meta">
-        <time datetime="2017-10-30T00:00:00-04:00" itemprop="datePublished">
+        <time datetime="2017-10-30T00:00:00-07:00" itemprop="datePublished">
           Oct 30, 2017
         </time>
         
diff --git a/2017/11/08/android-rpc-introduction.html b/2017/11/08/android-rpc-introduction.html
index 7d15d82..f7e34b5 100644
--- a/2017/11/08/android-rpc-introduction.html
+++ b/2017/11/08/android-rpc-introduction.html
@@ -140,7 +140,7 @@
     <div class="span14 w-100">
       <h1>Remote Profile and Test Deep Learning Cross Compilation on Mobile Phones with TVM RPC </h1>
       <p class="post-meta">
-        <time datetime="2017-11-08T00:00:00-05:00" itemprop="datePublished">
+        <time datetime="2017-11-08T00:00:00-08:00" itemprop="datePublished">
           Nov 8, 2017
         </time>
         
diff --git a/2018/01/16/opt-mali-gpu.html b/2018/01/16/opt-mali-gpu.html
index a039779..40fc7f0 100644
--- a/2018/01/16/opt-mali-gpu.html
+++ b/2018/01/16/opt-mali-gpu.html
@@ -140,7 +140,7 @@
     <div class="span14 w-100">
       <h1>Optimizing Mobile Deep Learning on ARM GPU with TVM </h1>
       <p class="post-meta">
-        <time datetime="2018-01-16T00:00:00-05:00" itemprop="datePublished">
+        <time datetime="2018-01-16T00:00:00-08:00" itemprop="datePublished">
           Jan 16, 2018
         </time>
         
diff --git a/2018/03/12/webgl.html b/2018/03/12/webgl.html
index 792c922..74313b5 100644
--- a/2018/03/12/webgl.html
+++ b/2018/03/12/webgl.html
@@ -140,7 +140,7 @@
     <div class="span14 w-100">
       <h1>Compiling Deep Learning Models to WebGL with TVM </h1>
       <p class="post-meta">
-        <time datetime="2018-03-12T00:00:00-04:00" itemprop="datePublished">
+        <time datetime="2018-03-12T00:00:00-07:00" itemprop="datePublished">
           Mar 12, 2018
         </time>
         
diff --git a/2018/03/23/nmt-transformer-optimize.html b/2018/03/23/nmt-transformer-optimize.html
index 2182327..35c211a 100644
--- a/2018/03/23/nmt-transformer-optimize.html
+++ b/2018/03/23/nmt-transformer-optimize.html
@@ -140,7 +140,7 @@
     <div class="span14 w-100">
       <h1>Bringing TVM into TensorFlow for Optimizing Neural Machine Translation on GPU </h1>
       <p class="post-meta">
-        <time datetime="2018-03-23T00:00:00-04:00" itemprop="datePublished">
+        <time datetime="2018-03-23T00:00:00-07:00" itemprop="datePublished">
           Mar 23, 2018
         </time>
         
diff --git a/2018/07/12/vta-release-announcement.html b/2018/07/12/vta-release-announcement.html
index c60a3e1..1250749 100644
--- a/2018/07/12/vta-release-announcement.html
+++ b/2018/07/12/vta-release-announcement.html
@@ -140,7 +140,7 @@
     <div class="span14 w-100">
       <h1>VTA: An Open, Customizable Deep Learning Acceleration Stack  </h1>
       <p class="post-meta">
-        <time datetime="2018-07-12T00:00:00-04:00" itemprop="datePublished">
+        <time datetime="2018-07-12T00:00:00-07:00" itemprop="datePublished">
           Jul 12, 2018
         </time>
         
diff --git a/2018/08/10/DLPack-Bridge.html b/2018/08/10/DLPack-Bridge.html
index 7ec1aaa..af4d193 100644
--- a/2018/08/10/DLPack-Bridge.html
+++ b/2018/08/10/DLPack-Bridge.html
@@ -140,7 +140,7 @@
     <div class="span14 w-100">
       <h1>Building a Cross-Framework Deep Learning Compiler via DLPack </h1>
       <p class="post-meta">
-        <time datetime="2018-08-10T00:00:00-04:00" itemprop="datePublished">
+        <time datetime="2018-08-10T00:00:00-07:00" itemprop="datePublished">
           Aug 10, 2018
         </time>
         
diff --git a/2018/10/03/auto-opt-all.html b/2018/10/03/auto-opt-all.html
index 98269c7..ac36190 100644
--- a/2018/10/03/auto-opt-all.html
+++ b/2018/10/03/auto-opt-all.html
@@ -140,7 +140,7 @@
     <div class="span14 w-100">
       <h1>Automatic Kernel Optimization for Deep Learning on All Hardware Platforms </h1>
       <p class="post-meta">
-        <time datetime="2018-10-03T00:00:00-04:00" itemprop="datePublished">
+        <time datetime="2018-10-03T00:00:00-07:00" itemprop="datePublished">
           Oct 3, 2018
         </time>
         
diff --git a/2018/10/09/ml-in-tees.html b/2018/10/09/ml-in-tees.html
index 992e1a3..0f59a69 100644
--- a/2018/10/09/ml-in-tees.html
+++ b/2018/10/09/ml-in-tees.html
@@ -140,7 +140,7 @@
     <div class="span14 w-100">
       <h1>Efficient Privacy-Preserving ML Using TVM </h1>
       <p class="post-meta">
-        <time datetime="2018-10-09T00:00:00-04:00" itemprop="datePublished">
+        <time datetime="2018-10-09T00:00:00-07:00" itemprop="datePublished">
           Oct 9, 2018
         </time>
         
diff --git a/2018/12/18/lowprecision-conv.html b/2018/12/18/lowprecision-conv.html
index c5def47..f32251d 100644
--- a/2018/12/18/lowprecision-conv.html
+++ b/2018/12/18/lowprecision-conv.html
@@ -140,7 +140,7 @@
     <div class="span14 w-100">
       <h1>Automating Generation of Low Precision Deep Learning Operators </h1>
       <p class="post-meta">
-        <time datetime="2018-12-18T00:00:00-05:00" itemprop="datePublished">
+        <time datetime="2018-12-18T00:00:00-08:00" itemprop="datePublished">
           Dec 18, 2018
         </time>
         
diff --git a/2019/01/19/Golang.html b/2019/01/19/Golang.html
index 27a39f0..6b8b94a 100644
--- a/2019/01/19/Golang.html
+++ b/2019/01/19/Golang.html
@@ -140,7 +140,7 @@
     <div class="span14 w-100">
       <h1>TVM Golang Runtime for Deep Learning Deployment </h1>
       <p class="post-meta">
-        <time datetime="2019-01-19T00:00:00-05:00" itemprop="datePublished">
+        <time datetime="2019-01-19T00:00:00-08:00" itemprop="datePublished">
           Jan 19, 2019
         </time>
         
diff --git a/2019/03/18/tvm-apache-announcement.html b/2019/03/18/tvm-apache-announcement.html
index 386de84..19b5017 100644
--- a/2019/03/18/tvm-apache-announcement.html
+++ b/2019/03/18/tvm-apache-announcement.html
@@ -140,7 +140,7 @@
     <div class="span14 w-100">
       <h1>TVM Deep Learning Compiler Joins Apache Software Foundation </h1>
       <p class="post-meta">
-        <time datetime="2019-03-18T00:00:00-04:00" itemprop="datePublished">
+        <time datetime="2019-03-18T00:00:00-07:00" itemprop="datePublished">
           Mar 18, 2019
         </time>
         
diff --git a/2019/04/29/opt-cuda-quantized.html b/2019/04/29/opt-cuda-quantized.html
index 3b401af..1c55a9a 100644
--- a/2019/04/29/opt-cuda-quantized.html
+++ b/2019/04/29/opt-cuda-quantized.html
@@ -140,7 +140,7 @@
     <div class="span14 w-100">
       <h1>Automating Optimization of Quantized Deep Learning Models on CUDA </h1>
       <p class="post-meta">
-        <time datetime="2019-04-29T12:00:00-04:00" itemprop="datePublished">
+        <time datetime="2019-04-29T09:00:00-07:00" itemprop="datePublished">
           Apr 29, 2019
         </time>
         
diff --git a/2019/05/30/pytorch-frontend.html b/2019/05/30/pytorch-frontend.html
index ad8281b..a4dd9a3 100644
--- a/2019/05/30/pytorch-frontend.html
+++ b/2019/05/30/pytorch-frontend.html
@@ -140,7 +140,7 @@
     <div class="span14 w-100">
       <h1>Integrating TVM into PyTorch </h1>
       <p class="post-meta">
-        <time datetime="2019-05-30T00:00:00-04:00" itemprop="datePublished">
+        <time datetime="2019-05-30T00:00:00-07:00" itemprop="datePublished">
           May 30, 2019
         </time>
         
diff --git a/2020/05/14/compiling-machine-learning-to-webassembly-and-webgpu.html b/2020/05/14/compiling-machine-learning-to-webassembly-and-webgpu.html
index 38bd956..50f01e7 100644
--- a/2020/05/14/compiling-machine-learning-to-webassembly-and-webgpu.html
+++ b/2020/05/14/compiling-machine-learning-to-webassembly-and-webgpu.html
@@ -140,7 +140,7 @@
     <div class="span14 w-100">
       <h1>Compiling Machine Learning to WASM and WebGPU with Apache TVM </h1>
       <p class="post-meta">
-        <time datetime="2020-05-14T00:00:00-04:00" itemprop="datePublished">
+        <time datetime="2020-05-14T00:00:00-07:00" itemprop="datePublished">
           May 14, 2020
         </time>
         
diff --git a/2020/06/04/tinyml-how-tvm-is-taming-tiny.html b/2020/06/04/tinyml-how-tvm-is-taming-tiny.html
index bcb1aed..ec640c7 100644
--- a/2020/06/04/tinyml-how-tvm-is-taming-tiny.html
+++ b/2020/06/04/tinyml-how-tvm-is-taming-tiny.html
@@ -140,7 +140,7 @@
     <div class="span14 w-100">
       <h1>TinyML - How TVM is Taming Tiny </h1>
       <p class="post-meta">
-        <time datetime="2020-06-04T00:00:00-04:00" itemprop="datePublished">
+        <time datetime="2020-06-04T00:00:00-07:00" itemprop="datePublished">
           Jun 4, 2020
         </time>
         
diff --git a/2020/07/14/bert-pytorch-tvm.html b/2020/07/14/bert-pytorch-tvm.html
index a563504..387e219 100644
--- a/2020/07/14/bert-pytorch-tvm.html
+++ b/2020/07/14/bert-pytorch-tvm.html
@@ -140,7 +140,7 @@
     <div class="span14 w-100">
       <h1>Bridging PyTorch and TVM </h1>
       <p class="post-meta">
-        <time datetime="2020-07-14T00:00:00-04:00" itemprop="datePublished">
+        <time datetime="2020-07-14T00:00:00-07:00" itemprop="datePublished">
           Jul 14, 2020
         </time>
         
diff --git a/2020/07/15/how-to-bring-your-own-codegen-to-tvm.html b/2020/07/15/how-to-bring-your-own-codegen-to-tvm.html
index 1e16544..f6b24c2 100644
--- a/2020/07/15/how-to-bring-your-own-codegen-to-tvm.html
+++ b/2020/07/15/how-to-bring-your-own-codegen-to-tvm.html
@@ -140,7 +140,7 @@
     <div class="span14 w-100">
       <h1>How to Bring Your Own Codegen to TVM </h1>
       <p class="post-meta">
-        <time datetime="2020-07-15T00:00:00-04:00" itemprop="datePublished">
+        <time datetime="2020-07-15T00:00:00-07:00" itemprop="datePublished">
           Jul 15, 2020
         </time>
         
diff --git a/2020/09/26/bring-your-own-datatypes.html b/2020/09/26/bring-your-own-datatypes.html
index 0dc4fb0..135d0db 100644
--- a/2020/09/26/bring-your-own-datatypes.html
+++ b/2020/09/26/bring-your-own-datatypes.html
@@ -140,7 +140,7 @@
     <div class="span14 w-100">
       <h1>Bring Your Own Datatypes: Enabling Custom Datatype Exploration in TVM </h1>
       <p class="post-meta">
-        <time datetime="2020-09-26T00:00:00-04:00" itemprop="datePublished">
+        <time datetime="2020-09-26T00:00:00-07:00" itemprop="datePublished">
           Sep 26, 2020
         </time>
         
diff --git a/2021/03/03/intro-auto-scheduler.html b/2021/03/03/intro-auto-scheduler.html
index 10d944c..208511e 100644
--- a/2021/03/03/intro-auto-scheduler.html
+++ b/2021/03/03/intro-auto-scheduler.html
@@ -140,7 +140,7 @@
     <div class="span14 w-100">
       <h1>Introducing TVM Auto-scheduler (a.k.a. Ansor) </h1>
       <p class="post-meta">
-        <time datetime="2021-03-03T00:00:00-05:00" itemprop="datePublished">
+        <time datetime="2021-03-03T00:00:00-08:00" itemprop="datePublished">
           Mar 3, 2021
         </time>
         
diff --git a/atom.xml b/atom.xml
index d413e62..097924c 100644
--- a/atom.xml
+++ b/atom.xml
@@ -4,7 +4,7 @@
  <title>TVM</title>
  <link href="https://tvm.apache.org" rel="self"/>
  <link href="https://tvm.apache.org"/>
- <updated>2021-07-29T18:32:17-04:00</updated>
+ <updated>2021-11-24T16:19:59-08:00</updated>
  <id>https://tvm.apache.org</id>
  <author>
    <name></name>
@@ -15,7 +15,7 @@
  <entry>
    <title>Introducing TVM Auto-scheduler (a.k.a. Ansor)</title>
    <link href="https://tvm.apache.org/2021/03/03/intro-auto-scheduler"/>
-   <updated>2021-03-03T00:00:00-05:00</updated>
+   <updated>2021-03-03T00:00:00-08:00</updated>
    <id>https://tvm.apache.org/2021/03/03/intro-auto-scheduler</id>
    <content type="html">&lt;p&gt;Optimizing the execution speed of deep neural networks is extremely hard with the growing
 model size, operator diversity, and hardware heterogeneity.
@@ -145,7 +145,7 @@ sparse operators, low-precision operators, and dynamic shape better.&lt;/p&gt;
  <entry>
    <title>Bring Your Own Datatypes: Enabling Custom Datatype Exploration in TVM</title>
    <link href="https://tvm.apache.org/2020/09/26/bring-your-own-datatypes"/>
-   <updated>2020-09-26T00:00:00-04:00</updated>
+   <updated>2020-09-26T00:00:00-07:00</updated>
    <id>https://tvm.apache.org/2020/09/26/bring-your-own-datatypes</id>
    <content type="html">&lt;p&gt;In this post, we describe the Bring Your Own Datatypes framework, which enables the use of custom datatypes within TVM.&lt;/p&gt;
 
@@ -438,7 +438,7 @@ For more documentation about the Bring Your Own Datatypes framework
  <entry>
    <title>How to Bring Your Own Codegen to TVM</title>
    <link href="https://tvm.apache.org/2020/07/15/how-to-bring-your-own-codegen-to-tvm"/>
-   <updated>2020-07-15T00:00:00-04:00</updated>
+   <updated>2020-07-15T00:00:00-07:00</updated>
    <id>https://tvm.apache.org/2020/07/15/how-to-bring-your-own-codegen-to-tvm</id>
    <content type="html">&lt;p&gt;To free data scientists from worrying about the performance when developing a new model, hardware backend providers (e.g., Intel, NVIDIA, ARM, etc) either provide kernel libraries such as cuBLAS or cuDNN with many commonly used deep learning kernels, or provide frameworks such as DNNL or TensorRT with a graph engine to let users describe their models in a certain way to achieve high performance. In addition, emerging deep learning accelerators also have t [...]
 
@@ -917,7 +917,7 @@ Figure 4: After Graph Partitioning.
  <entry>
    <title>Bridging PyTorch and TVM</title>
    <link href="https://tvm.apache.org/2020/07/14/bert-pytorch-tvm"/>
-   <updated>2020-07-14T00:00:00-04:00</updated>
+   <updated>2020-07-14T00:00:00-07:00</updated>
    <id>https://tvm.apache.org/2020/07/14/bert-pytorch-tvm</id>
    <content type="html">
 &lt;p&gt;(A more code-heavy variant is crossposted on the more PyTorch affine &lt;a href=&quot;https://lernapparat.de/transformers-pytorch-tvm/&quot;&gt;Lernapparat&lt;/a&gt;,
@@ -1440,7 +1440,7 @@ He is a PyTorch core developer and co-authored &lt;a href=&quot;https://www.mann
  <entry>
    <title>TinyML - How TVM is Taming Tiny</title>
    <link href="https://tvm.apache.org/2020/06/04/tinyml-how-tvm-is-taming-tiny"/>
-   <updated>2020-06-04T00:00:00-04:00</updated>
+   <updated>2020-06-04T00:00:00-07:00</updated>
    <id>https://tvm.apache.org/2020/06/04/tinyml-how-tvm-is-taming-tiny</id>
    <content type="html">
 &lt;p&gt;&lt;img src=&quot;/images/microtvm/logo.png&quot; alt=&quot;microTVM logo&quot; width=&quot;30%&quot; /&gt;&lt;br /&gt;&lt;/p&gt;
@@ -1749,7 +1749,7 @@ Diagram from CMSIS-NN paper showing a 2x2 matrix multiplication microkernel&lt;/
  <entry>
    <title>Compiling Machine Learning to WASM and WebGPU with Apache TVM</title>
    <link href="https://tvm.apache.org/2020/05/14/compiling-machine-learning-to-webassembly-and-webgpu"/>
-   <updated>2020-05-14T00:00:00-04:00</updated>
+   <updated>2020-05-14T00:00:00-07:00</updated>
    <id>https://tvm.apache.org/2020/05/14/compiling-machine-learning-to-webassembly-and-webgpu</id>
    <content type="html">&lt;p&gt;&lt;strong&gt;TLDR&lt;/strong&gt;&lt;/p&gt;
 
@@ -1836,7 +1836,7 @@ Diagram from CMSIS-NN paper showing a 2x2 matrix multiplication microkernel&lt;/
  <entry>
    <title>Integrating TVM into PyTorch</title>
    <link href="https://tvm.apache.org/2019/05/30/pytorch-frontend"/>
-   <updated>2019-05-30T00:00:00-04:00</updated>
+   <updated>2019-05-30T00:00:00-07:00</updated>
    <id>https://tvm.apache.org/2019/05/30/pytorch-frontend</id>
    <content type="html">&lt;p&gt;As TVM continuously demonstrates improvements to the efficiency of deep learning execution,
 it has become clear that PyTorch stands to benefit from directly leveraging the compiler stack.
@@ -1938,7 +1938,7 @@ relay_graph = torch_tvm.to_relay(mul, inputs)
  <entry>
    <title>Automating Optimization of Quantized Deep Learning Models on CUDA</title>
    <link href="https://tvm.apache.org/2019/04/29/opt-cuda-quantized"/>
-   <updated>2019-04-29T12:00:00-04:00</updated>
+   <updated>2019-04-29T09:00:00-07:00</updated>
    <id>https://tvm.apache.org/2019/04/29/opt-cuda-quantized</id>
    <content type="html">&lt;p&gt;Deep learning has been successfully applied to a variety of tasks.
 On real-time scenarios such as inference on autonomous vehicles, the inference speed of the model is critical.
@@ -2082,7 +2082,7 @@ We show that automatic optimization in TVM makes it easy and flexible to support
  <entry>
    <title>TVM Deep Learning Compiler Joins Apache Software Foundation</title>
    <link href="https://tvm.apache.org/2019/03/18/tvm-apache-announcement"/>
-   <updated>2019-03-18T00:00:00-04:00</updated>
+   <updated>2019-03-18T00:00:00-07:00</updated>
    <id>https://tvm.apache.org/2019/03/18/tvm-apache-announcement</id>
    <content type="html">&lt;p&gt;There is an increasing need to bring machine learning to a wide diversity of hardware devices. Current frameworks rely on vendor-specific operator libraries and optimize for a narrow range of server-class GPUs. Deploying workloads to new platforms – such as mobile phones, embedded devices, and accelerators (e.g., FPGAs, ASICs) – requires significant manual effort.&lt;/p&gt;
 
@@ -2105,7 +2105,7 @@ We show that automatic optimization in TVM makes it easy and flexible to support
  <entry>
    <title>TVM Golang Runtime for Deep Learning Deployment</title>
    <link href="https://tvm.apache.org/2019/01/19/Golang"/>
-   <updated>2019-01-19T00:00:00-05:00</updated>
+   <updated>2019-01-19T00:00:00-08:00</updated>
    <id>https://tvm.apache.org/2019/01/19/Golang</id>
    <content type="html">&lt;h2 id=&quot;introduction&quot;&gt;Introduction&lt;/h2&gt;
 
@@ -2275,7 +2275,7 @@ closure as TVM packed function and invoke the same across programming language b
  <entry>
    <title>Automating Generation of Low Precision Deep Learning Operators</title>
    <link href="https://tvm.apache.org/2018/12/18/lowprecision-conv"/>
-   <updated>2018-12-18T00:00:00-05:00</updated>
+   <updated>2018-12-18T00:00:00-08:00</updated>
    <id>https://tvm.apache.org/2018/12/18/lowprecision-conv</id>
    <content type="html">&lt;p&gt;As deep learning models grow larger and more complex, deploying them on low powered phone and IoT
 devices becomes challenging because of their limited compute and energy budgets. A  recent  trend
@@ -2436,7 +2436,7 @@ Note: x86 doesn’t support a vectorized popcount for this microarchitecture, so
  <entry>
    <title>Efficient Privacy-Preserving ML Using TVM</title>
    <link href="https://tvm.apache.org/2018/10/09/ml-in-tees"/>
-   <updated>2018-10-09T00:00:00-04:00</updated>
+   <updated>2018-10-09T00:00:00-07:00</updated>
    <id>https://tvm.apache.org/2018/10/09/ml-in-tees</id>
    <content type="html">&lt;p&gt;This post describes Myelin, a framework for privacy-preserving machine learning in trusted hardware enclaves, and how TVM makes Myelin fast.
 The key idea is that TVM, unlike other popular ML frameworks, compiles models into lightweight, optimized, and dependency-free libraries which can fit into resource constrained enclaves.&lt;/p&gt;
@@ -2552,7 +2552,7 @@ His research interest is in the general domain of ML on shared private data, but
  <entry>
    <title>Automatic Kernel Optimization for Deep Learning on All Hardware Platforms</title>
    <link href="https://tvm.apache.org/2018/10/03/auto-opt-all"/>
-   <updated>2018-10-03T00:00:00-04:00</updated>
+   <updated>2018-10-03T00:00:00-07:00</updated>
    <id>https://tvm.apache.org/2018/10/03/auto-opt-all</id>
    <content type="html">&lt;p&gt;Optimizing the performance of deep neural network on a diverse range of hardware platforms is still a hard
 problem for AI developers. In terms of system support, we are facing a many-to-many problem here:
@@ -2946,7 +2946,7 @@ for inference deployment. TVM just provides such a solution.&lt;/p&gt;
  <entry>
    <title>Building a Cross-Framework Deep Learning Compiler via DLPack</title>
    <link href="https://tvm.apache.org/2018/08/10/DLPack-Bridge"/>
-   <updated>2018-08-10T00:00:00-04:00</updated>
+   <updated>2018-08-10T00:00:00-07:00</updated>
    <id>https://tvm.apache.org/2018/08/10/DLPack-Bridge</id>
    <content type="html">&lt;p&gt;Deep learning frameworks such as Tensorflow, PyTorch, and ApacheMxNet provide a
 powerful toolbox for quickly prototyping and deploying deep learning models.
@@ -3085,7 +3085,7 @@ support, and can be used to implement convenient converters, such as
  <entry>
    <title>VTA: An Open, Customizable Deep Learning Acceleration Stack </title>
    <link href="https://tvm.apache.org/2018/07/12/vta-release-announcement"/>
-   <updated>2018-07-12T00:00:00-04:00</updated>
+   <updated>2018-07-12T00:00:00-07:00</updated>
    <id>https://tvm.apache.org/2018/07/12/vta-release-announcement</id>
    <content type="html">&lt;p style=&quot;text-align: center&quot;&gt;Thierry Moreau(VTA architect), Tianqi Chen(TVM stack), Ziheng Jiang†(graph compilation), Luis Vega(cloud deployment)&lt;/p&gt;
 &lt;p style=&quot;text-align: center&quot;&gt;Advisors: Luis Ceze, Carlos Guestrin, Arvind Krishnamurthy&lt;/p&gt;
@@ -3227,7 +3227,7 @@ This kind of high-level visibility is essential to system designers who want to
  <entry>
    <title>Bringing TVM into TensorFlow for Optimizing Neural Machine Translation on GPU</title>
    <link href="https://tvm.apache.org/2018/03/23/nmt-transformer-optimize"/>
-   <updated>2018-03-23T00:00:00-04:00</updated>
+   <updated>2018-03-23T00:00:00-07:00</updated>
    <id>https://tvm.apache.org/2018/03/23/nmt-transformer-optimize</id>
    <content type="html">&lt;h2 id=&quot;author&quot;&gt;Author&lt;/h2&gt;
 
@@ -3493,7 +3493,7 @@ C = tvm.compute(
  <entry>
    <title>Compiling Deep Learning Models to WebGL with TVM</title>
    <link href="https://tvm.apache.org/2018/03/12/webgl"/>
-   <updated>2018-03-12T00:00:00-04:00</updated>
+   <updated>2018-03-12T00:00:00-07:00</updated>
    <id>https://tvm.apache.org/2018/03/12/webgl</id>
    <content type="html">&lt;p&gt;Now TVM comes with a brand-new OpenGL/WebGL backend!
 This blog post explains what it is, and what you can achieve with it.&lt;/p&gt;
@@ -3609,7 +3609,7 @@ optimizations into the TVM stack.&lt;/p&gt;
  <entry>
    <title>Optimizing Mobile Deep Learning on ARM GPU with TVM</title>
    <link href="https://tvm.apache.org/2018/01/16/opt-mali-gpu"/>
-   <updated>2018-01-16T00:00:00-05:00</updated>
+   <updated>2018-01-16T00:00:00-08:00</updated>
    <id>https://tvm.apache.org/2018/01/16/opt-mali-gpu</id>
    <content type="html">&lt;p&gt;With the great success of deep learning, the demand for
 deploying deep neural networks to mobile devices is growing rapidly.
@@ -4183,7 +4183,7 @@ advice and &lt;a href=&quot;https://github.com/yzhliu&quot;&gt;Yizhi Liu&lt;/a&g
  <entry>
    <title>Remote Profile and Test Deep Learning Cross Compilation on Mobile Phones with TVM RPC</title>
    <link href="https://tvm.apache.org/2017/11/08/android-rpc-introduction"/>
-   <updated>2017-11-08T00:00:00-05:00</updated>
+   <updated>2017-11-08T00:00:00-08:00</updated>
    <id>https://tvm.apache.org/2017/11/08/android-rpc-introduction</id>
    <content type="html">&lt;p&gt;TVM stack is an end to end compilation stack to deploy deep learning workloads to all hardware backends.
 Thanks to the NNVM compiler support of TVM stack, we can now directly compile descriptions from deep learning frameworks and compile them to bare metal code.
@@ -4411,7 +4411,7 @@ make jvminstall
  <entry>
    <title>Bringing AMDGPUs to TVM Stack and NNVM Compiler with ROCm</title>
    <link href="https://tvm.apache.org/2017/10/30/Bringing-AMDGPUs-to-TVM-Stack-and-NNVM-Compiler-with-ROCm"/>
-   <updated>2017-10-30T00:00:00-04:00</updated>
+   <updated>2017-10-30T00:00:00-07:00</updated>
    <id>https://tvm.apache.org/2017/10/30/Bringing-AMDGPUs-to-TVM-Stack-and-NNVM-Compiler-with-ROCm</id>
    <content type="html">&lt;p style=&quot;text-align: center&quot;&gt;Aditya Atluri, Advanced Micro Devices, Inc.&lt;/p&gt;
 &lt;p style=&quot;text-align: center&quot;&gt;Masahiro Masuda, Ziosoft, Inc.&lt;/p&gt;
diff --git a/community.html b/community.html
index 3e42147..3f77b36 100644
--- a/community.html
+++ b/community.html
@@ -299,6 +299,10 @@ This is a community maintained list of organizations using and contributing to t
     </li>
     
     <li>
+        <img src="/images/community/simaai.png" />
+    </li>
+    
+    <li>
         <img src="/images/community/sjtu.png" />
     </li>
     
diff --git a/download.html b/download.html
index ea3637d..10b8821 100644
--- a/download.html
+++ b/download.html
@@ -156,6 +156,12 @@ Choose your flavor of download from the following links:</p>
   </thead>
   <tbody>
     <tr>
+      <td>0.8.0</td>
+      <td><a href="https://dist.apache.org/repos/dist/release/tvm/tvm-v0.8.0/apache-tvm-src-v0.8.0.tar.gz">apache-tvm-src-v0.8.0.tar.gz</a></td>
+      <td><a href="https://dist.apache.org/repos/dist/release/tvm/tvm-v0.8.0/apache-tvm-src-v0.8.0.tar.gz.asc">.asc</a></td>
+      <td><a href="https://dist.apache.org/repos/dist/release/tvm/tvm-v0.8.0/apache-tvm-src-v0.8.0.tar.gz.sha512">.sha512</a></td>
+    </tr>
+    <tr>
       <td>0.7.0</td>
       <td><a href="https://dist.apache.org/repos/dist/release/tvm/tvm-v0.7.0/apache-tvm-src-v0.7.0-incubating.tar.gz">apache-tvm-src-v0.7.0-incubating.tar.gz</a></td>
       <td><a href="https://dist.apache.org/repos/dist/release/tvm/tvm-v0.7.0/apache-tvm-src-v0.7.0-incubating.tar.gz.asc">.asc</a></td>
diff --git a/feed.xml b/feed.xml
index 1031968..ca1b8b3 100644
--- a/feed.xml
+++ b/feed.xml
@@ -1,4 +1,4 @@
-<?xml version="1.0" encoding="utf-8"?><feed xmlns="http://www.w3.org/2005/Atom" ><generator uri="https://jekyllrb.com/" version="4.1.1">Jekyll</generator><link href="/feed.xml" rel="self" type="application/atom+xml" /><link href="/" rel="alternate" type="text/html" /><updated>2021-07-29T18:32:17-04:00</updated><id>/feed.xml</id><title type="html">TVM</title><author><name>{&quot;name&quot;=&gt;nil}</name></author><entry><title type="html">Introducing TVM Auto-scheduler (a.k.a. Ansor)</tit [...]
+<?xml version="1.0" encoding="utf-8"?><feed xmlns="http://www.w3.org/2005/Atom" ><generator uri="https://jekyllrb.com/" version="4.1.1">Jekyll</generator><link href="/feed.xml" rel="self" type="application/atom+xml" /><link href="/" rel="alternate" type="text/html" /><updated>2021-11-24T16:19:59-08:00</updated><id>/feed.xml</id><title type="html">TVM</title><author><name>{&quot;name&quot;=&gt;nil}</name></author><entry><title type="html">Introducing TVM Auto-scheduler (a.k.a. Ansor)</tit [...]
 model size, operator diversity, and hardware heterogeneity.
 From a computational perspective, deep neural networks are just layers and layers of tensor computations.
 These tensor computations, such as matmul and conv2d, can be easily described by mathematical expressions.
@@ -118,7 +118,7 @@ sparse operators, low-precision operators, and dynamic shape better.&lt;/p&gt;
 &lt;p&gt;[1] Tutorials: &lt;a href=&quot;https://tvm.apache.org/docs/tutorials/index.html#autoscheduler-template-free-auto-scheduling&quot;&gt;https://tvm.apache.org/docs/tutorials/index.html#autoscheduler-template-free-auto-scheduling&lt;/a&gt;&lt;br /&gt;
 [2] Benchmark repo: &lt;a href=&quot;https://github.com/tlc-pack/TLCBench&quot;&gt;https://github.com/tlc-pack/TLCBench&lt;/a&gt;&lt;br /&gt;
 [3] OSDI Paper: &lt;a href=&quot;https://arxiv.org/abs/2006.06762&quot;&gt;Ansor : Generating High-Performance Tensor Programs for Deep Learning&lt;/a&gt;&lt;br /&gt;
-[4] Results on Apple M1 chip: &lt;a href=&quot;https://medium.com/octoml/on-the-apple-m1-beating-apples-core-ml-4-with-30-model-performance-improvements-9d94af7d1b2d&quot;&gt;https://medium.com/octoml/on-the-apple-m1-beating-apples-core-ml-4-with-30-model-performance-improvements-9d94af7d1b2d&lt;/a&gt;.&lt;/p&gt;</content><author><name>Lianmin Zheng, Chengfan Jia, Minmin Sun, Zhao Wu, Cody Hao Yu</name></author><summary type="html">Optimizing the execution speed of deep neural networks i [...]
+[4] Results on Apple M1 chip: &lt;a href=&quot;https://medium.com/octoml/on-the-apple-m1-beating-apples-core-ml-4-with-30-model-performance-improvements-9d94af7d1b2d&quot;&gt;https://medium.com/octoml/on-the-apple-m1-beating-apples-core-ml-4-with-30-model-performance-improvements-9d94af7d1b2d&lt;/a&gt;.&lt;/p&gt;</content><author><name>Lianmin Zheng, Chengfan Jia, Minmin Sun, Zhao Wu, Cody Hao Yu</name></author><summary type="html">Optimizing the execution speed of deep neural networks i [...]
 
 &lt;h2 id=&quot;introduction&quot;&gt;Introduction&lt;/h2&gt;
 
@@ -402,7 +402,7 @@ For more documentation about the Bring Your Own Datatypes framework
       &lt;p&gt;&lt;a href=&quot;https://posithub.org/docs/BeatingFloatingPoint.pdf&quot; target=&quot;_blank&quot;&gt;Beating Floating Point at its Own Game: Posit Arithmetic&lt;/a&gt; &lt;a href=&quot;#fnref:posit&quot; class=&quot;reversefootnote&quot; role=&quot;doc-backlink&quot;&gt;&amp;#8617;&lt;/a&gt;&lt;/p&gt;
     &lt;/li&gt;
   &lt;/ol&gt;
-&lt;/div&gt;</content><author><name>Gus Smith, Andrew Liu</name></author><summary type="html">In this post, we describe the Bring Your Own Datatypes framework, which enables the use of custom datatypes within TVM.</summary></entry><entry><title type="html">How to Bring Your Own Codegen to TVM</title><link href="/2020/07/15/how-to-bring-your-own-codegen-to-tvm" rel="alternate" type="text/html" title="How to Bring Your Own Codegen to TVM" /><published>2020-07-15T00:00:00-04:00</published>< [...]
+&lt;/div&gt;</content><author><name>Gus Smith, Andrew Liu</name></author><summary type="html">In this post, we describe the Bring Your Own Datatypes framework, which enables the use of custom datatypes within TVM.</summary></entry><entry><title type="html">How to Bring Your Own Codegen to TVM</title><link href="/2020/07/15/how-to-bring-your-own-codegen-to-tvm" rel="alternate" type="text/html" title="How to Bring Your Own Codegen to TVM" /><published>2020-07-15T00:00:00-07:00</published>< [...]
 
 &lt;p&gt;However, users have to learn a new programming interface when they attempt to work on a new kernel library or a device. As a result, the demand for a unified programming interface becomes more and more important to let all users and hardware backend providers stand on the same page.&lt;/p&gt;
 
@@ -871,7 +871,7 @@ Figure 4: After Graph Partitioning.
 
 &lt;h2 id=&quot;acknowledgment&quot;&gt;Acknowledgment&lt;/h2&gt;
 
-&lt;p&gt;We would like to thank our colleague Animesh Jain for valuable discussions in the framework design; Tianqi Chen and Jared Roesch from OctoML for system design discussions and prototyping; Masahiro Masuda from the TVM community to help code review and improve the DNNL integration. We would also like to thank Ramana Radhakrishnan, Matthew Barrett, Manupa Karunaratne, and Luke Hutton from ARM, U.K. for contributing several helpful ideas, related Relay passes, and the Arm Compute Li [...]
+&lt;p&gt;We would like to thank our colleague Animesh Jain for valuable discussions in the framework design; Tianqi Chen and Jared Roesch from OctoML for system design discussions and prototyping; Masahiro Masuda from the TVM community to help code review and improve the DNNL integration. We would also like to thank Ramana Radhakrishnan, Matthew Barrett, Manupa Karunaratne, and Luke Hutton from ARM, U.K. for contributing several helpful ideas, related Relay passes, and the Arm Compute Li [...]
  the Jupyter Notebook to follow along is on &lt;a href=&quot;https://github.com/t-vi/pytorch-tvmisc/tree/master/transformers-pytorch-tvm/&quot;&gt;github&lt;/a&gt;.)&lt;/p&gt;
 
 &lt;p&gt;Some of the most intriguing applications of Artificial Intelligence have been in Natural Language Processing.
@@ -1384,7 +1384,7 @@ one would want to re-do cheap computation, most prominently point-wise computati
 &lt;h1 id=&quot;author&quot;&gt;Author&lt;/h1&gt;
 
 &lt;p&gt;&lt;a href=&quot;https://lernapparat.de/&quot;&gt;Thomas Viehmann&lt;/a&gt; is the founder of &lt;a href=&quot;https://mathinf.eu/&quot;&gt;MathInf GmbH&lt;/a&gt;, Munich, Germany, a boutique training and consultancy firm focusing on Machine Learning and PyTorch.
-He is a PyTorch core developer and co-authored &lt;a href=&quot;https://www.manning.com/books/deep-learning-with-pytorch&quot;&gt;Deep Learning with PyTorch&lt;/a&gt;, which currently available as &lt;a href=&quot;https://pytorch.org/deep-learning-with-pytorch&quot;&gt;free download from the PyTorch website&lt;/a&gt;.&lt;/p&gt;</content><author><name>Thomas Viehmann, MathInf GmbH</name></author><summary type="html"></summary></entry><entry><title type="html">TinyML - How TVM is Taming Ti [...]
+He is a PyTorch core developer and co-authored &lt;a href=&quot;https://www.manning.com/books/deep-learning-with-pytorch&quot;&gt;Deep Learning with PyTorch&lt;/a&gt;, which currently available as &lt;a href=&quot;https://pytorch.org/deep-learning-with-pytorch&quot;&gt;free download from the PyTorch website&lt;/a&gt;.&lt;/p&gt;</content><author><name>Thomas Viehmann, MathInf GmbH</name></author><summary type="html"></summary></entry><entry><title type="html">TinyML - How TVM is Taming Ti [...]
 
 &lt;p&gt;The proliferation of low-cost, AI-powered consumer devices has led to widespread interest in “bare-metal” (low-power, often without an operating system) devices among ML researchers and practitioners.  While it is already possible for experts to run &lt;em&gt;some&lt;/em&gt; models on &lt;em&gt;some&lt;/em&gt; bare-metal devices, optimizing models for diverse sets of devices is challenging, often requiring manually optimized device-specific libraries.  And for those platforms wi [...]
 
@@ -1683,7 +1683,7 @@ Diagram from CMSIS-NN paper showing a 2x2 matrix multiplication microkernel&lt;/
   &lt;li&gt;&lt;a href=&quot;https://homes.cs.washington.edu/~moreau/&quot;&gt;Thierry Moreau&lt;/a&gt;, for mentoring me during my time at OctoML.&lt;/li&gt;
   &lt;li&gt;&lt;a href=&quot;https://homes.cs.washington.edu/~vegaluis/&quot;&gt;Luis Vega&lt;/a&gt;, for teaching me the fundamentals of interacting with microcontrollers.&lt;/li&gt;
   &lt;li&gt;&lt;a href=&quot;https://www.linkedin.com/in/themadrasi/?originalSubdomain=uk&quot;&gt;Ramana Radhakrishnan&lt;/a&gt;, for supplying the Arm hardware used in our experiments and for providing guidance on its usage.&lt;/li&gt;
-&lt;/ul&gt;</content><author><name>Logan Weber and Andrew Reusch, OctoML</name></author><summary type="html"></summary></entry><entry><title type="html">Compiling Machine Learning to WASM and WebGPU with Apache TVM</title><link href="/2020/05/14/compiling-machine-learning-to-webassembly-and-webgpu" rel="alternate" type="text/html" title="Compiling Machine Learning to WASM and WebGPU with Apache TVM" /><published>2020-05-14T00:00:00-04:00</published><updated>2020-05-14T00:00:00-04:00</upd [...]
+&lt;/ul&gt;</content><author><name>Logan Weber and Andrew Reusch, OctoML</name></author><summary type="html"></summary></entry><entry><title type="html">Compiling Machine Learning to WASM and WebGPU with Apache TVM</title><link href="/2020/05/14/compiling-machine-learning-to-webassembly-and-webgpu" rel="alternate" type="text/html" title="Compiling Machine Learning to WASM and WebGPU with Apache TVM" /><published>2020-05-14T00:00:00-07:00</published><updated>2020-05-14T00:00:00-07:00</upd [...]
 
 &lt;p&gt;We introduced support for WASM and WebGPU to the Apache TVM deep learning compiler. Our experiments shows that  TVM’s WebGPU backend can get &lt;strong&gt;close to native&lt;/strong&gt; &lt;strong&gt;GPU performance&lt;/strong&gt; when deploying models to the web.&lt;/p&gt;
 
@@ -1761,7 +1761,7 @@ Diagram from CMSIS-NN paper showing a 2x2 matrix multiplication microkernel&lt;/
 
 &lt;h2 id=&quot;acknowledgement&quot;&gt;Acknowledgement&lt;/h2&gt;
 
-&lt;p&gt;We would like to thank the emscripten project for providing the WASM compilation infrastructures as well as the JS library support on the web. We would also like to thank the WebGPU community for various helpful discussions. Thanks to Fletcher Haynes for valuable feedbacks to the post.&lt;/p&gt;</content><author><name>Tianqi Chen and Jared Roesch, OctoML</name></author><summary type="html">TLDR</summary></entry><entry><title type="html">Integrating TVM into PyTorch</title><link  [...]
+&lt;p&gt;We would like to thank the emscripten project for providing the WASM compilation infrastructures as well as the JS library support on the web. We would also like to thank the WebGPU community for various helpful discussions. Thanks to Fletcher Haynes for valuable feedbacks to the post.&lt;/p&gt;</content><author><name>Tianqi Chen and Jared Roesch, OctoML</name></author><summary type="html">TLDR</summary></entry><entry><title type="html">Integrating TVM into PyTorch</title><link  [...]
 it has become clear that PyTorch stands to benefit from directly leveraging the compiler stack.
 A major tenet of PyTorch is providing seamless and robust integrations that don’t get in the user’s way.
 To that end, PyTorch now has an official TVM-based backend, &lt;a href=&quot;https://github.com/pytorch/tvm&quot;&gt;torch_tvm&lt;/a&gt;.&lt;/p&gt;
@@ -1853,7 +1853,7 @@ def mul(a, b, c):
 
 # via script
 relay_graph = torch_tvm.to_relay(mul, inputs)
-&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;&lt;/div&gt;</content><author><name>Bram Wasti</name></author><summary type="html">As TVM continuously demonstrates improvements to the efficiency of deep learning execution, it has become clear that PyTorch stands to benefit from directly leveraging the compiler stack. A major tenet of PyTorch is providing seamless and robust integrations that don’t get in the user’s way. To that end, PyTorch now has an official TVM-based backend, torch_tvm.</summary [...]
+&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;&lt;/div&gt;</content><author><name>Bram Wasti</name></author><summary type="html">As TVM continuously demonstrates improvements to the efficiency of deep learning execution, it has become clear that PyTorch stands to benefit from directly leveraging the compiler stack. A major tenet of PyTorch is providing seamless and robust integrations that don’t get in the user’s way. To that end, PyTorch now has an official TVM-based backend, torch_tvm.</summary [...]
 On real-time scenarios such as inference on autonomous vehicles, the inference speed of the model is critical.
 Network quantization is an effective approach to accelerating deep learning models.
 In quantized models, both data and model parameters are represented with low precision data types such as &lt;code class=&quot;language-plaintext highlighter-rouge&quot;&gt;int8&lt;/code&gt; and &lt;code class=&quot;language-plaintext highlighter-rouge&quot;&gt;float16&lt;/code&gt;.
@@ -1988,7 +1988,7 @@ We show that automatic optimization in TVM makes it easy and flexible to support
 &lt;/ul&gt;
 
 &lt;h1 id=&quot;bio--acknowledgement&quot;&gt;Bio &amp;amp; Acknowledgement&lt;/h1&gt;
-&lt;p&gt;&lt;a href=&quot;https://wuwei.io/&quot;&gt;Wuwei Lin&lt;/a&gt; is an undergraduate student at SJTU. He is currently an intern at TuSimple. The author has many thanks to &lt;a href=&quot;https://homes.cs.washington.edu/~tqchen/&quot;&gt;Tianqi Chen&lt;/a&gt; and &lt;a href=&quot;https://homes.cs.washington.edu/~eqy/&quot;&gt;Eddie Yan&lt;/a&gt; for their reviews.&lt;/p&gt;</content><author><name>Wuwei Lin</name></author><summary type="html">Deep learning has been successfully ap [...]
+&lt;p&gt;&lt;a href=&quot;https://wuwei.io/&quot;&gt;Wuwei Lin&lt;/a&gt; is an undergraduate student at SJTU. He is currently an intern at TuSimple. The author has many thanks to &lt;a href=&quot;https://homes.cs.washington.edu/~tqchen/&quot;&gt;Tianqi Chen&lt;/a&gt; and &lt;a href=&quot;https://homes.cs.washington.edu/~eqy/&quot;&gt;Eddie Yan&lt;/a&gt; for their reviews.&lt;/p&gt;</content><author><name>Wuwei Lin</name></author><summary type="html">Deep learning has been successfully ap [...]
 
 &lt;p&gt;TVM is an open source deep learning compiler stack that closes the gap between the productivity-focused deep learning frameworks, and the performance- or efficiency-oriented hardware backends. Today, we are glad to announce that the TVM community has decided to move on to Apache incubator, and becomes an Apache(incubating) project.&lt;/p&gt;
 
@@ -2002,7 +2002,7 @@ We show that automatic optimization in TVM makes it easy and flexible to support
 
 &lt;p&gt;We would like to take this chance to thank the Allen School for supporting the SAMPL team that gave birth to the TVM project. We would also like to thank the Halide project which provided the basis for TVM’s loop-level IR and initial code generation. We would like to thank our Apache incubator mentors for introducing the project to Apache and providing useful guidance. Finally, we would like to thank the TVM community and all of the organizations, as listed above, that supported [...]
 
-&lt;p&gt;See also the &lt;a href=&quot;https://news.cs.washington.edu/2019/03/18/allen-schools-tvm-deep-learning-compiler-framework-transitions-to-apache/&quot;&gt;Allen School news about the transition here&lt;/a&gt;, &lt;a href=&quot;https://sampl.cs.washington.edu/tvmconf/#about-tvmconf&quot;&gt;TVM conference program slides and recordings&lt;/a&gt;, and &lt;a href=&quot;https://tvm.apache.org/docs//contribute/community.html&quot;&gt;our community guideline here&lt;/a&gt;. Follow us o [...]
+&lt;p&gt;See also the &lt;a href=&quot;https://news.cs.washington.edu/2019/03/18/allen-schools-tvm-deep-learning-compiler-framework-transitions-to-apache/&quot;&gt;Allen School news about the transition here&lt;/a&gt;, &lt;a href=&quot;https://sampl.cs.washington.edu/tvmconf/#about-tvmconf&quot;&gt;TVM conference program slides and recordings&lt;/a&gt;, and &lt;a href=&quot;https://tvm.apache.org/docs//contribute/community.html&quot;&gt;our community guideline here&lt;/a&gt;. Follow us o [...]
 
 &lt;p&gt;TVM is an open deep learning compiler stack to compile various deep learning models from different
 frameworks to CPU, GPU or specialized accelerators.  TVM supports model compilation from a wide range
diff --git a/images/community/simaai.png b/images/community/simaai.png
new file mode 100644
index 0000000..097278b
Binary files /dev/null and b/images/community/simaai.png differ
diff --git a/rss.xml b/rss.xml
index 3aa0a87..1023d16 100644
--- a/rss.xml
+++ b/rss.xml
@@ -5,8 +5,8 @@
         <description>TVM - </description>
         <link>https://tvm.apache.org</link>
         <atom:link href="https://tvm.apache.org" rel="self" type="application/rss+xml" />
-        <lastBuildDate>Thu, 29 Jul 2021 18:32:17 -0400</lastBuildDate>
-        <pubDate>Thu, 29 Jul 2021 18:32:17 -0400</pubDate>
+        <lastBuildDate>Wed, 24 Nov 2021 16:19:59 -0800</lastBuildDate>
+        <pubDate>Wed, 24 Nov 2021 16:19:59 -0800</pubDate>
         <ttl>60</ttl>
 
 
@@ -137,7 +137,7 @@ sparse operators, low-precision operators, and dynamic shape better.&lt;/p&gt;
 </description>
                 <link>https://tvm.apache.org/2021/03/03/intro-auto-scheduler</link>
                 <guid>https://tvm.apache.org/2021/03/03/intro-auto-scheduler</guid>
-                <pubDate>Wed, 03 Mar 2021 00:00:00 -0500</pubDate>
+                <pubDate>Wed, 03 Mar 2021 00:00:00 -0800</pubDate>
         </item>
 
         <item>
@@ -430,7 +430,7 @@ For more documentation about the Bring Your Own Datatypes framework
 </description>
                 <link>https://tvm.apache.org/2020/09/26/bring-your-own-datatypes</link>
                 <guid>https://tvm.apache.org/2020/09/26/bring-your-own-datatypes</guid>
-                <pubDate>Sat, 26 Sep 2020 00:00:00 -0400</pubDate>
+                <pubDate>Sat, 26 Sep 2020 00:00:00 -0700</pubDate>
         </item>
 
         <item>
@@ -909,7 +909,7 @@ Figure 4: After Graph Partitioning.
 </description>
                 <link>https://tvm.apache.org/2020/07/15/how-to-bring-your-own-codegen-to-tvm</link>
                 <guid>https://tvm.apache.org/2020/07/15/how-to-bring-your-own-codegen-to-tvm</guid>
-                <pubDate>Wed, 15 Jul 2020 00:00:00 -0400</pubDate>
+                <pubDate>Wed, 15 Jul 2020 00:00:00 -0700</pubDate>
         </item>
 
         <item>
@@ -1432,7 +1432,7 @@ He is a PyTorch core developer and co-authored &lt;a href=&quot;https://www.mann
 </description>
                 <link>https://tvm.apache.org/2020/07/14/bert-pytorch-tvm</link>
                 <guid>https://tvm.apache.org/2020/07/14/bert-pytorch-tvm</guid>
-                <pubDate>Tue, 14 Jul 2020 00:00:00 -0400</pubDate>
+                <pubDate>Tue, 14 Jul 2020 00:00:00 -0700</pubDate>
         </item>
 
         <item>
@@ -1741,7 +1741,7 @@ Diagram from CMSIS-NN paper showing a 2x2 matrix multiplication microkernel&lt;/
 </description>
                 <link>https://tvm.apache.org/2020/06/04/tinyml-how-tvm-is-taming-tiny</link>
                 <guid>https://tvm.apache.org/2020/06/04/tinyml-how-tvm-is-taming-tiny</guid>
-                <pubDate>Thu, 04 Jun 2020 00:00:00 -0400</pubDate>
+                <pubDate>Thu, 04 Jun 2020 00:00:00 -0700</pubDate>
         </item>
 
         <item>
@@ -1828,7 +1828,7 @@ Diagram from CMSIS-NN paper showing a 2x2 matrix multiplication microkernel&lt;/
 </description>
                 <link>https://tvm.apache.org/2020/05/14/compiling-machine-learning-to-webassembly-and-webgpu</link>
                 <guid>https://tvm.apache.org/2020/05/14/compiling-machine-learning-to-webassembly-and-webgpu</guid>
-                <pubDate>Thu, 14 May 2020 00:00:00 -0400</pubDate>
+                <pubDate>Thu, 14 May 2020 00:00:00 -0700</pubDate>
         </item>
 
         <item>
@@ -1930,7 +1930,7 @@ relay_graph = torch_tvm.to_relay(mul, inputs)
 </description>
                 <link>https://tvm.apache.org/2019/05/30/pytorch-frontend</link>
                 <guid>https://tvm.apache.org/2019/05/30/pytorch-frontend</guid>
-                <pubDate>Thu, 30 May 2019 00:00:00 -0400</pubDate>
+                <pubDate>Thu, 30 May 2019 00:00:00 -0700</pubDate>
         </item>
 
         <item>
@@ -2074,7 +2074,7 @@ We show that automatic optimization in TVM makes it easy and flexible to support
 </description>
                 <link>https://tvm.apache.org/2019/04/29/opt-cuda-quantized</link>
                 <guid>https://tvm.apache.org/2019/04/29/opt-cuda-quantized</guid>
-                <pubDate>Mon, 29 Apr 2019 12:00:00 -0400</pubDate>
+                <pubDate>Mon, 29 Apr 2019 09:00:00 -0700</pubDate>
         </item>
 
         <item>
@@ -2097,7 +2097,7 @@ We show that automatic optimization in TVM makes it easy and flexible to support
 </description>
                 <link>https://tvm.apache.org/2019/03/18/tvm-apache-announcement</link>
                 <guid>https://tvm.apache.org/2019/03/18/tvm-apache-announcement</guid>
-                <pubDate>Mon, 18 Mar 2019 00:00:00 -0400</pubDate>
+                <pubDate>Mon, 18 Mar 2019 00:00:00 -0700</pubDate>
         </item>
 
         <item>
@@ -2267,7 +2267,7 @@ closure as TVM packed function and invoke the same across programming language b
 </description>
                 <link>https://tvm.apache.org/2019/01/19/Golang</link>
                 <guid>https://tvm.apache.org/2019/01/19/Golang</guid>
-                <pubDate>Sat, 19 Jan 2019 00:00:00 -0500</pubDate>
+                <pubDate>Sat, 19 Jan 2019 00:00:00 -0800</pubDate>
         </item>
 
         <item>
@@ -2428,7 +2428,7 @@ Note: x86 doesn’t support a vectorized popcount for this microarchitecture, so
 </description>
                 <link>https://tvm.apache.org/2018/12/18/lowprecision-conv</link>
                 <guid>https://tvm.apache.org/2018/12/18/lowprecision-conv</guid>
-                <pubDate>Tue, 18 Dec 2018 00:00:00 -0500</pubDate>
+                <pubDate>Tue, 18 Dec 2018 00:00:00 -0800</pubDate>
         </item>
 
         <item>
@@ -2544,7 +2544,7 @@ His research interest is in the general domain of ML on shared private data, but
 </description>
                 <link>https://tvm.apache.org/2018/10/09/ml-in-tees</link>
                 <guid>https://tvm.apache.org/2018/10/09/ml-in-tees</guid>
-                <pubDate>Tue, 09 Oct 2018 00:00:00 -0400</pubDate>
+                <pubDate>Tue, 09 Oct 2018 00:00:00 -0700</pubDate>
         </item>
 
         <item>
@@ -2938,7 +2938,7 @@ for inference deployment. TVM just provides such a solution.&lt;/p&gt;
 </description>
                 <link>https://tvm.apache.org/2018/10/03/auto-opt-all</link>
                 <guid>https://tvm.apache.org/2018/10/03/auto-opt-all</guid>
-                <pubDate>Wed, 03 Oct 2018 00:00:00 -0400</pubDate>
+                <pubDate>Wed, 03 Oct 2018 00:00:00 -0700</pubDate>
         </item>
 
         <item>
@@ -3077,7 +3077,7 @@ support, and can be used to implement convenient converters, such as
 </description>
                 <link>https://tvm.apache.org/2018/08/10/DLPack-Bridge</link>
                 <guid>https://tvm.apache.org/2018/08/10/DLPack-Bridge</guid>
-                <pubDate>Fri, 10 Aug 2018 00:00:00 -0400</pubDate>
+                <pubDate>Fri, 10 Aug 2018 00:00:00 -0700</pubDate>
         </item>
 
         <item>
@@ -3219,7 +3219,7 @@ This kind of high-level visibility is essential to system designers who want to
 </description>
                 <link>https://tvm.apache.org/2018/07/12/vta-release-announcement</link>
                 <guid>https://tvm.apache.org/2018/07/12/vta-release-announcement</guid>
-                <pubDate>Thu, 12 Jul 2018 00:00:00 -0400</pubDate>
+                <pubDate>Thu, 12 Jul 2018 00:00:00 -0700</pubDate>
         </item>
 
         <item>
@@ -3485,7 +3485,7 @@ C = tvm.compute(
 </description>
                 <link>https://tvm.apache.org/2018/03/23/nmt-transformer-optimize</link>
                 <guid>https://tvm.apache.org/2018/03/23/nmt-transformer-optimize</guid>
-                <pubDate>Fri, 23 Mar 2018 00:00:00 -0400</pubDate>
+                <pubDate>Fri, 23 Mar 2018 00:00:00 -0700</pubDate>
         </item>
 
         <item>
@@ -3601,7 +3601,7 @@ optimizations into the TVM stack.&lt;/p&gt;
 </description>
                 <link>https://tvm.apache.org/2018/03/12/webgl</link>
                 <guid>https://tvm.apache.org/2018/03/12/webgl</guid>
-                <pubDate>Mon, 12 Mar 2018 00:00:00 -0400</pubDate>
+                <pubDate>Mon, 12 Mar 2018 00:00:00 -0700</pubDate>
         </item>
 
         <item>
@@ -4175,7 +4175,7 @@ advice and &lt;a href=&quot;https://github.com/yzhliu&quot;&gt;Yizhi Liu&lt;/a&g
 </description>
                 <link>https://tvm.apache.org/2018/01/16/opt-mali-gpu</link>
                 <guid>https://tvm.apache.org/2018/01/16/opt-mali-gpu</guid>
-                <pubDate>Tue, 16 Jan 2018 00:00:00 -0500</pubDate>
+                <pubDate>Tue, 16 Jan 2018 00:00:00 -0800</pubDate>
         </item>
 
         <item>
@@ -4403,7 +4403,7 @@ make jvminstall
 </description>
                 <link>https://tvm.apache.org/2017/11/08/android-rpc-introduction</link>
                 <guid>https://tvm.apache.org/2017/11/08/android-rpc-introduction</guid>
-                <pubDate>Wed, 08 Nov 2017 00:00:00 -0500</pubDate>
+                <pubDate>Wed, 08 Nov 2017 00:00:00 -0800</pubDate>
         </item>
 
         <item>
@@ -4629,7 +4629,7 @@ BB0_6:
 </description>
                 <link>https://tvm.apache.org/2017/10/30/Bringing-AMDGPUs-to-TVM-Stack-and-NNVM-Compiler-with-ROCm</link>
                 <guid>https://tvm.apache.org/2017/10/30/Bringing-AMDGPUs-to-TVM-Stack-and-NNVM-Compiler-with-ROCm</guid>
-                <pubDate>Mon, 30 Oct 2017 00:00:00 -0400</pubDate>
+                <pubDate>Mon, 30 Oct 2017 00:00:00 -0700</pubDate>
         </item>